index
int64
0
0
repo_id
stringlengths
48
65
file_path
stringlengths
62
122
content
stringlengths
27
3.15M
__index_level_0__
int64
0
10k
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/main_classes/text_generation.mdx-5e23a84f.js
import{S as Um,i as Vm,s as Km,e as s,k as l,w as m,t,M as Zm,c as a,d as o,m as d,a as r,x as g,h as n,b as c,F as e,g as w,y as _,q as u,o as h,B as f}from"../../chunks/vendor-4833417e.js";import{T as Rm}from"../../chunks/Tip-fffd6df1.js";import{D as be}from"../../chunks/Docstring-4f315ed9.js";import{C as ye}from"../../chunks/CodeBlock-6a3d1b46.js";import{I as Cs}from"../../chunks/IconCopyLink-4b81c553.js";import"../../chunks/CopyButton-dacfbfaf.js";function Xm(it){let p,P,j,T,I,E,xe,W,G,B,v;return{c(){p=s("p"),P=t("Apart from "),j=s("code"),T=t("inputs"),I=t(`, all the arguments below will default to the value of the attribute of the same name as defined in the model\u2019s config (`),E=s("code"),xe=t("config.json"),W=t(`) which in turn defaults to the `),G=s("a"),B=t("PretrainedConfig"),v=t(" of the model."),this.h()},l($){p=a($,"P",{});var b=r(p);P=n(b,"Apart from "),j=a(b,"CODE",{});var Me=r(j);T=n(Me,"inputs"),Me.forEach(o),I=n(b,`, all the arguments below will default to the value of the attribute of the same name as defined in the model\u2019s config (`),E=a(b,"CODE",{});var Le=r(E);xe=n(Le,"config.json"),Le.forEach(o),W=n(b,`) which in turn defaults to the `),G=a(b,"A",{href:!0});var H=r(G);B=n(H,"PretrainedConfig"),H.forEach(o),v=n(b," of the model."),b.forEach(o),this.h()},h(){c(G,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig")},m($,b){w($,p,b),e(p,P),e(p,j),e(j,T),e(p,I),e(p,E),e(E,xe),e(p,W),e(p,G),e(G,B),e(p,v)},d($){$&&o(p)}}}function Jm(it){let p,P,j,T,I,E,xe,W,G,B,v;return{c(){p=s("p"),P=t("Apart from "),j=s("code"),T=t("inputs"),I=t(`, all the arguments below will default to the value of the attribute of the same name as defined in the model\u2019s config (`),E=s("code"),xe=t("config.json"),W=t(`) which in turn defaults to the `),G=s("a"),B=t("PretrainedConfig"),v=t(" of the model."),this.h()},l($){p=a($,"P",{});var b=r(p);P=n(b,"Apart from "),j=a(b,"CODE",{});var Me=r(j);T=n(Me,"inputs"),Me.forEach(o),I=n(b,`, all the arguments below will default to the value of the attribute of the same name as defined in the model\u2019s config (`),E=a(b,"CODE",{});var Le=r(E);xe=n(Le,"config.json"),Le.forEach(o),W=n(b,`) which in turn defaults to the `),G=a(b,"A",{href:!0});var H=r(G);B=n(H,"PretrainedConfig"),H.forEach(o),v=n(b," of the model."),b.forEach(o),this.h()},h(){c(G,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig")},m($,b){w($,p,b),e(p,P),e(p,j),e(j,T),e(p,I),e(p,E),e(E,xe),e(p,W),e(p,G),e(G,B),e(p,v)},d($){$&&o(p)}}}function Qm(it){let p,P,j,T,I,E,xe,W,G,B,v,$,b,Me,Le,H,Is,Ws,en,Bs,Hs,tn,Rs,Us,nn,Vs,Ks,on,Zs,Xs,_s,S,Js,On,Qs,Ys,qn,ea,ta,sn,na,oa,an,sa,aa,rn,ra,ia,us,we,Ze,Gn,lt,la,Sn,da,hs,k,dt,ca,ct,pa,ln,ma,ga,_a,pt,ua,dn,ha,fa,ba,F,R,$n,xa,ka,cn,va,ya,Fn,ja,Ma,An,La,wa,Ta,U,zn,Ea,Oa,pn,qa,Ga,Pn,Sa,$a,Nn,Fa,Aa,za,V,Dn,Pa,Na,mn,Da,Ca,Cn,Ia,Wa,In,Ba,Ha,Ra,K,Wn,Ua,Va,gn,Ka,Za,Bn,Xa,Ja,Hn,Qa,Ya,er,Z,Rn,tr,nr,_n,or,sr,Un,ar,rr,Vn,ir,lr,dr,X,Kn,cr,pr,un,mr,gr,Zn,_r,ur,Xn,hr,fr,br,x,mt,xr,Jn,kr,vr,A,J,Qn,yr,jr,hn,Mr,Lr,Yn,wr,Tr,eo,Er,Or,qr,Q,to,Gr,Sr,fn,$r,Fr,no,Ar,zr,oo,Pr,Nr,Dr,Y,so,Cr,Ir,bn,Wr,Br,ao,Hr,Rr,ro,Ur,Vr,Kr,ee,io,Zr,Xr,xn,Jr,Qr,lo,Yr,ei,co,ti,ni,oi,te,po,si,ai,kn,ri,ii,mo,li,di,go,ci,pi,mi,ne,_o,gi,_i,vn,ui,hi,uo,fi,bi,ho,xi,ki,vi,Xe,yi,gt,ji,_t,Mi,Li,wi,fo,Ti,Ei,bo,Oi,qi,ut,Gi,xo,Si,$i,ht,Fi,ko,Ai,zi,ft,Pi,oe,bt,Ni,xt,Di,vo,Ci,Ii,Wi,yo,Bi,Hi,kt,Ri,se,vt,Ui,yt,Vi,jo,Ki,Zi,Xi,Mo,Ji,Qi,jt,Yi,ae,Mt,el,Lt,tl,Lo,nl,ol,sl,wo,al,rl,wt,il,re,Tt,ll,Et,dl,To,cl,pl,ml,Eo,gl,_l,Ot,ul,ie,qt,hl,Gt,fl,Oo,bl,xl,kl,qo,vl,yl,St,jl,le,$t,Ml,Ft,Ll,Go,wl,Tl,El,So,Ol,ql,At,fs,Te,Je,$o,zt,Gl,Fo,Sl,bs,ke,Pt,$l,Nt,Fl,yn,Al,zl,Pl,O,Dt,Nl,Ao,Dl,Cl,Ct,Il,It,Wl,Bl,Hl,ve,Rl,zo,Ul,Vl,Po,Kl,Zl,jn,Xl,Jl,Ql,Wt,Yl,Bt,ed,td,nd,No,od,sd,Ht,xs,Ee,Qe,Do,Rt,ad,Co,rd,ks,z,Ut,id,Vt,ld,Mn,dd,cd,pd,Kt,md,Ln,gd,_d,ud,Oe,de,Io,hd,fd,Wo,bd,xd,Bo,kd,vd,Ho,yd,jd,Md,ce,Ro,Ld,wd,Uo,Td,Ed,Vo,Od,qd,Ko,Gd,Sd,$d,pe,Zo,Fd,Ad,Xo,zd,Pd,Jo,Nd,Dd,Qo,Cd,Id,Wd,q,Zt,Bd,Yo,Hd,Rd,qe,me,es,Ud,Vd,ts,Kd,Zd,ns,Xd,Jd,os,Qd,Yd,ec,ge,ss,tc,nc,as,oc,sc,rs,ac,rc,is,ic,lc,dc,_e,ls,cc,pc,ds,mc,gc,cs,_c,uc,ps,hc,fc,bc,Ye,xc,Xt,kc,Jt,vc,yc,jc,ms,Mc,Lc,Qt,vs;return E=new Cs({}),lt=new Cs({}),dt=new be({props:{name:"class transformers.generation_utils.GenerationMixin",anchor:"transformers.generation_utils.GenerationMixin",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_utils.py#L379"}}),mt=new be({props:{name:"generate",anchor:"transformers.generation_utils.GenerationMixin.generate",parameters:[{name:"inputs",val:": typing.Optional[torch.Tensor] = None"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"min_length",val:": typing.Optional[int] = None"},{name:"do_sample",val:": typing.Optional[bool] = None"},{name:"early_stopping",val:": typing.Optional[bool] = None"},{name:"num_beams",val:": typing.Optional[int] = None"},{name:"temperature",val:": typing.Optional[float] = None"},{name:"top_k",val:": typing.Optional[int] = None"},{name:"top_p",val:": typing.Optional[float] = None"},{name:"typical_p",val:": typing.Optional[float] = None"},{name:"repetition_penalty",val:": typing.Optional[float] = None"},{name:"bad_words_ids",val:": typing.Optional[typing.Iterable[int]] = None"},{name:"force_words_ids",val:": typing.Union[typing.Iterable[int], typing.Iterable[typing.Iterable[int]], NoneType] = None"},{name:"bos_token_id",val:": typing.Optional[int] = None"},{name:"pad_token_id",val:": typing.Optional[int] = None"},{name:"eos_token_id",val:": typing.Optional[int] = None"},{name:"length_penalty",val:": typing.Optional[float] = None"},{name:"no_repeat_ngram_size",val:": typing.Optional[int] = None"},{name:"encoder_no_repeat_ngram_size",val:": typing.Optional[int] = None"},{name:"num_return_sequences",val:": typing.Optional[int] = None"},{name:"max_time",val:": typing.Optional[float] = None"},{name:"max_new_tokens",val:": typing.Optional[int] = None"},{name:"decoder_start_token_id",val:": typing.Optional[int] = None"},{name:"use_cache",val:": typing.Optional[bool] = None"},{name:"num_beam_groups",val:": typing.Optional[int] = None"},{name:"diversity_penalty",val:": typing.Optional[float] = None"},{name:"prefix_allowed_tokens_fn",val:": typing.Union[typing.Callable[[int, torch.Tensor], typing.List[int]], NoneType] = None"},{name:"logits_processor",val:": typing.Optional[transformers.generation_logits_process.LogitsProcessorList] = []"},{name:"stopping_criteria",val:": typing.Optional[transformers.generation_stopping_criteria.StoppingCriteriaList] = []"},{name:"constraints",val:": typing.Optional[typing.List[transformers.generation_beam_constraints.Constraint]] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"output_scores",val:": typing.Optional[bool] = None"},{name:"return_dict_in_generate",val:": typing.Optional[bool] = None"},{name:"forced_bos_token_id",val:": typing.Optional[int] = None"},{name:"forced_eos_token_id",val:": typing.Optional[int] = None"},{name:"remove_invalid_values",val:": typing.Optional[bool] = None"},{name:"synced_gpus",val:": typing.Optional[bool] = False"},{name:"exponential_decay_length_penalty",val:": typing.Union[typing.Tuple[typing.Union[int, float]], NoneType] = None"},{name:"**model_kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_utils.py#L832",parametersDescription:[{anchor:"transformers.generation_utils.GenerationMixin.generate.inputs",description:`<strong>inputs</strong> (<code>torch.Tensor</code> of varying shape depending on the modality, <em>optional</em>) &#x2014; The sequence used as a prompt for the generation or as model inputs to the encoder. If <code>None</code> the method initializes it with <code>bos_token_id</code> and a batch size of 1. For decoder-only models <code>inputs</code> should of in the format of <code>input_ids</code>. For encoder-decoder models <em>inputs</em> can represent any of <code>input_ids</code>, <code>input_values</code>, <code>input_features</code>, or <code>pixel_values</code>.`,name:"inputs"},{anchor:"transformers.generation_utils.GenerationMixin.generate.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>, defaults to <code>model.config.max_length</code>) &#x2014; The maximum length of the sequence to be generated.`,name:"max_length"},{anchor:"transformers.generation_utils.GenerationMixin.generate.max_new_tokens",description:`<strong>max_new_tokens</strong> (<code>int</code>, <em>optional</em>, defaults to None) &#x2014; The maximum numbers of tokens to generate, ignore the current number of tokens. Use either <code>max_new_tokens</code> or <code>max_length</code> but not both, they serve the same purpose.`,name:"max_new_tokens"},{anchor:"transformers.generation_utils.GenerationMixin.generate.min_length",description:`<strong>min_length</strong> (<code>int</code>, <em>optional</em>, defaults to 10) &#x2014; The minimum length of the sequence to be generated.`,name:"min_length"},{anchor:"transformers.generation_utils.GenerationMixin.generate.do_sample",description:`<strong>do_sample</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use sampling ; use greedy decoding otherwise.`,name:"do_sample"},{anchor:"transformers.generation_utils.GenerationMixin.generate.early_stopping",description:`<strong>early_stopping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to stop the beam search when at least <code>num_beams</code> sentences are finished per batch or not.`,name:"early_stopping"},{anchor:"transformers.generation_utils.GenerationMixin.generate.num_beams",description:`<strong>num_beams</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Number of beams for beam search. 1 means no beam search.`,name:"num_beams"},{anchor:"transformers.generation_utils.GenerationMixin.generate.temperature",description:`<strong>temperature</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; The value used to module the next token probabilities.`,name:"temperature"},{anchor:"transformers.generation_utils.GenerationMixin.generate.top_k",description:`<strong>top_k</strong> (<code>int</code>, <em>optional</em>, defaults to 50) &#x2014; The number of highest probability vocabulary tokens to keep for top-k-filtering.`,name:"top_k"},{anchor:"transformers.generation_utils.GenerationMixin.generate.top_p",description:`<strong>top_p</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; If set to float &lt; 1, only the most probable tokens with probabilities that add up to <code>top_p</code> or higher are kept for generation.`,name:"top_p"},{anchor:"transformers.generation_utils.GenerationMixin.generate.repetition_penalty",description:`<strong>repetition_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; The parameter for repetition penalty. 1.0 means no penalty. See <a href="https://arxiv.org/pdf/1909.05858.pdf" rel="nofollow">this paper</a> for more details.`,name:"repetition_penalty"},{anchor:"transformers.generation_utils.GenerationMixin.generate.pad_token_id",description:`<strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.`,name:"pad_token_id"},{anchor:"transformers.generation_utils.GenerationMixin.generate.bos_token_id",description:`<strong>bos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>beginning-of-sequence</em> token.`,name:"bos_token_id"},{anchor:"transformers.generation_utils.GenerationMixin.generate.eos_token_id",description:`<strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.`,name:"eos_token_id"},{anchor:"transformers.generation_utils.GenerationMixin.generate.length_penalty",description:`<strong>length_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; Exponential penalty to the length. 1.0 means no penalty. Set to values &lt; 1.0 in order to encourage the model to generate shorter sequences, to a value &gt; 1.0 in order to encourage the model to produce longer sequences.`,name:"length_penalty"},{anchor:"transformers.generation_utils.GenerationMixin.generate.no_repeat_ngram_size",description:`<strong>no_repeat_ngram_size</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to int &gt; 0, all ngrams of that size can only occur once.`,name:"no_repeat_ngram_size"},{anchor:"transformers.generation_utils.GenerationMixin.generate.encoder_no_repeat_ngram_size",description:`<strong>encoder_no_repeat_ngram_size</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to int &gt; 0, all ngrams of that size that occur in the <code>encoder_input_ids</code> cannot occur in the <code>decoder_input_ids</code>.`,name:"encoder_no_repeat_ngram_size"},{anchor:"transformers.generation_utils.GenerationMixin.generate.bad_words_ids(List[List[int]],",description:`<strong>bad_words_ids(<code>List[List[int]]</code>,</strong> <em>optional</em>) &#x2014; List of token ids that are not allowed to be generated. In order to get the token ids of the words that should not appear in the generated text, use <code>tokenizer(bad_words, add_prefix_space=True, add_special_tokens=False).input_ids</code>.`,name:"bad_words_ids(List[List[int]],"},{anchor:"transformers.generation_utils.GenerationMixin.generate.force_words_ids(List[List[int]]",description:`<strong>force_words_ids(<code>List[List[int]]</code></strong> or <code>List[List[List[int]]]</code>, <em>optional</em>) &#x2014; List of token ids that must be generated. If given a <code>List[List[int]]</code>, this is treated as a simple list of words that must be included, the opposite to <code>bad_words_ids</code>. If given <code>List[List[List[int]]]</code>, this triggers a <a href="https://github.com/huggingface/transformers/issues/14081" rel="nofollow">disjunctive constraint</a>, where one can allow different forms of each word.`,name:"force_words_ids(List[List[int]]"},{anchor:"transformers.generation_utils.GenerationMixin.generate.num_return_sequences(int,",description:`<strong>num_return_sequences(<code>int</code>,</strong> <em>optional</em>, defaults to 1) &#x2014; The number of independently computed returned sequences for each element in the batch.`,name:"num_return_sequences(int,"},{anchor:"transformers.generation_utils.GenerationMixin.generate.max_time(float,",description:`<strong>max_time(<code>float</code>,</strong> <em>optional</em>, defaults to None) &#x2014; The maximum amount of time you allow the computation to run for in seconds. generation will still finish the current pass after allocated time has been passed.`,name:"max_time(float,"},{anchor:"transformers.generation_utils.GenerationMixin.generate.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values are in <code>[0, 1]</code>, 1 for tokens that are not masked, and 0 for masked tokens. If not provided, will default to a tensor the same shape as <code>input_ids</code> that masks the pad token. <a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.generation_utils.GenerationMixin.generate.decoder_start_token_id",description:`<strong>decoder_start_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; If an encoder-decoder model starts decoding with a different token than <em>bos</em>, the id of that token. use_cache &#x2014; (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>): Whether or not the model should use the past last key/values attentions (if applicable to the model) to speed up decoding.`,name:"decoder_start_token_id"},{anchor:"transformers.generation_utils.GenerationMixin.generate.num_beam_groups",description:`<strong>num_beam_groups</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Number of groups to divide <code>num_beams</code> into in order to ensure diversity among different groups of beams. <a href="https://arxiv.org/pdf/1610.02424.pdf" rel="nofollow">this paper</a> for more details.`,name:"num_beam_groups"},{anchor:"transformers.generation_utils.GenerationMixin.generate.diversity_penalty",description:`<strong>diversity_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; This value is subtracted from a beam&#x2019;s score if it generates a token same as any beam from other group at a particular time. Note that <code>diversity_penalty</code> is only effective if <code>group beam search</code> is enabled. prefix_allowed_tokens_fn &#x2014; (<code>Callable[[int, torch.Tensor], List[int]]</code>, <em>optional</em>): If provided, this function constraints the beam search to allowed tokens only at each step. If not provided no constraint is applied. This function takes 2 arguments: the batch ID <code>batch_id</code> and <code>input_ids</code>. It has to return a list with the allowed tokens for the next generation step conditioned on the batch ID <code>batch_id</code> and the previously generated tokens <code>inputs_ids</code>. This argument is useful for constrained generation conditioned on the prefix, as described in <a href="https://arxiv.org/abs/2010.00904" rel="nofollow">Autoregressive Entity Retrieval</a>.`,name:"diversity_penalty"},{anchor:"transformers.generation_utils.GenerationMixin.generate.logits_processor",description:`<strong>logits_processor</strong> (<code>LogitsProcessorList</code>, <em>optional</em>) &#x2014; Custom logits processors that complement the default logits processors built from arguments and a model&#x2019;s config. If a logit processor is passed that is already created with the arguments or a model&#x2019;s config an error is thrown. This feature is intended for advanced users.`,name:"logits_processor"},{anchor:"transformers.generation_utils.GenerationMixin.generate.stopping_criteria",description:`<strong>stopping_criteria</strong> (<code>StoppingCriteriaList</code>, <em>optional</em>) &#x2014; Custom stopping criteria that complement the default stopping criteria built from arguments and a model&#x2019;s config. If a stopping criteria is passed that is already created with the arguments or a model&#x2019;s config an error is thrown. This feature is intended for advanced users.`,name:"stopping_criteria"},{anchor:"transformers.generation_utils.GenerationMixin.generate.constraints",description:`<strong>constraints</strong> (<code>List[Constraint]</code>, <em>optional</em>) &#x2014; Custom constraints that can be added to the generation to ensure that the output will contain the use of certain tokens as defined by <code>Constraint</code> objects, in the most sensible way possible.`,name:"constraints"},{anchor:"transformers.generation_utils.GenerationMixin.generate.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more details.`,name:"output_attentions"},{anchor:"transformers.generation_utils.GenerationMixin.generate.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more details.`,name:"output_hidden_states"},{anchor:"transformers.generation_utils.GenerationMixin.generate.output_scores",description:`<strong>output_scores</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the prediction scores. See <code>scores</code> under returned tensors for more details.`,name:"output_scores"},{anchor:"transformers.generation_utils.GenerationMixin.generate.return_dict_in_generate",description:`<strong>return_dict_in_generate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict_in_generate"},{anchor:"transformers.generation_utils.GenerationMixin.generate.forced_bos_token_id",description:`<strong>forced_bos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the token to force as the first generated token after the <code>decoder_start_token_id</code>. Useful for multilingual models like <a href="../model_doc/mbart">mBART</a> where the first generated token needs to be the target language token.`,name:"forced_bos_token_id"},{anchor:"transformers.generation_utils.GenerationMixin.generate.forced_eos_token_id",description:`<strong>forced_eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the token to force as the last generated token when <code>max_length</code> is reached.`,name:"forced_eos_token_id"},{anchor:"transformers.generation_utils.GenerationMixin.generate.remove_invalid_values",description:`<strong>remove_invalid_values</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to remove possible <em>nan</em> and <em>inf</em> outputs of the model to prevent the generation method to crash. Note that using <code>remove_invalid_values</code> can slow down generation.`,name:"remove_invalid_values"},{anchor:"transformers.generation_utils.GenerationMixin.generate.synced_gpus",description:`<strong>synced_gpus</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to continue running the while loop until max_length (needed for ZeRO stage 3)`,name:"synced_gpus"},{anchor:"transformers.generation_utils.GenerationMixin.generate.exponential_decay_length_penalty",description:`<strong>exponential_decay_length_penalty</strong> (<code>tuple(int, float)</code>, <em>optional</em>) &#x2014; This Tuple adds an exponentially increasing length penalty, after a certain amount of tokens have been generated. The tuple shall consist of: <code>(start_index, decay_factor)</code> where <code>start_index</code> indicates where penalty starts and <code>decay_factor</code> represents the factor of exponential decay</p> <p>model<em>kwargs &#x2014; Additional model specific kwargs will be forwarded to the <code>forward</code> function of the model. If the model is an encoder-decoder model, encoder specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder</em>*.`,name:"exponential_decay_length_penalty"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput" >ModelOutput</a> (if <code>return_dict_in_generate=True</code> or when <code>config.return_dict_in_generate=True</code>) or a <code>torch.FloatTensor</code>.</p> <p>If the model is <em>not</em> an encoder-decoder model (<code>model.config.is_encoder_decoder=False</code>), the possible <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput" >ModelOutput</a> types are:</p> <ul> <li><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.generation_utils.GreedySearchDecoderOnlyOutput" >GreedySearchDecoderOnlyOutput</a>,</li> <li><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.generation_utils.SampleDecoderOnlyOutput" >SampleDecoderOnlyOutput</a>,</li> <li><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.generation_utils.BeamSearchDecoderOnlyOutput" >BeamSearchDecoderOnlyOutput</a>,</li> <li><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.generation_utils.BeamSampleDecoderOnlyOutput" >BeamSampleDecoderOnlyOutput</a></li> </ul> <p>If the model is an encoder-decoder model (<code>model.config.is_encoder_decoder=True</code>), the possible <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput" >ModelOutput</a> types are:</p> <ul> <li><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.generation_utils.GreedySearchEncoderDecoderOutput" >GreedySearchEncoderDecoderOutput</a>,</li> <li><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.generation_utils.SampleEncoderDecoderOutput" >SampleEncoderDecoderOutput</a>,</li> <li><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.generation_utils.BeamSearchEncoderDecoderOutput" >BeamSearchEncoderDecoderOutput</a>,</li> <li><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.generation_utils.BeamSampleEncoderDecoderOutput" >BeamSampleEncoderDecoderOutput</a></li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput" >ModelOutput</a> or <code>torch.LongTensor</code></p> `}}),Xe=new Rm({props:{warning:"&lcub;true}",$$slots:{default:[Xm]},$$scope:{ctx:it}}}),ut=new ye({props:{code:`from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("gpt2") model = AutoModelForCausalLM.from_pretrained("gpt2") prompt = "Today I believe we can finally" input_ids = tokenizer(prompt, return_tensors="pt").input_ids # generate up to 30 tokens outputs = model.generate(input_ids, do_sample=False, max_length=30) tokenizer.batch_decode(outputs, skip_special_tokens=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, AutoModelForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForCausalLM.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;Today I believe we can finally&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(prompt, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># generate up to 30 tokens</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.generate(input_ids, do_sample=<span class="hljs-literal">False</span>, max_length=<span class="hljs-number">30</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(outputs, skip_special_tokens=<span class="hljs-literal">True</span>) [<span class="hljs-string">&#x27;Today I believe we can finally get to the point where we can make a difference in the lives of the people of the United States of America.\\n&#x27;</span>]`}}),ht=new ye({props:{code:`from transformers import AutoTokenizer, AutoModelForCausalLM import torch tokenizer = AutoTokenizer.from_pretrained("gpt2") model = AutoModelForCausalLM.from_pretrained("gpt2") prompt = "Today I believe we can finally" input_ids = tokenizer(prompt, return_tensors="pt").input_ids # sample up to 30 tokens torch.manual_seed(0) outputs = model.generate(input_ids, do_sample=True, max_length=30) tokenizer.batch_decode(outputs, skip_special_tokens=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, AutoModelForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForCausalLM.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;Today I believe we can finally&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(prompt, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># sample up to 30 tokens</span> <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.generate(input_ids, do_sample=<span class="hljs-literal">True</span>, max_length=<span class="hljs-number">30</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(outputs, skip_special_tokens=<span class="hljs-literal">True</span>) [<span class="hljs-string">&#x27;Today I believe we can finally get rid of discrimination,&quot; said Rep. Mark Pocan (D-Wis.).\\n\\n&quot;Just look at the&#x27;</span>]`}}),ft=new ye({props:{code:`from transformers import AutoTokenizer, AutoModelForSeq2SeqLM tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-de") model = AutoModelForSeq2SeqLM.from_pretrained("Helsinki-NLP/opus-mt-en-de") sentence = "Paris is one of the densest populated areas in Europe." input_ids = tokenizer(sentence, return_tensors="pt").input_ids outputs = model.generate(input_ids) tokenizer.batch_decode(outputs, skip_special_tokens=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, AutoModelForSeq2SeqLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;Helsinki-NLP/opus-mt-en-de&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;Helsinki-NLP/opus-mt-en-de&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sentence = <span class="hljs-string">&quot;Paris is one of the densest populated areas in Europe.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(sentence, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.generate(input_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(outputs, skip_special_tokens=<span class="hljs-literal">True</span>) [<span class="hljs-string">&#x27;Paris ist eines der dichtesten besiedelten Gebiete Europas.&#x27;</span>]`}}),bt=new be({props:{name:"greedy_search",anchor:"transformers.generation_utils.GenerationMixin.greedy_search",parameters:[{name:"input_ids",val:": LongTensor"},{name:"logits_processor",val:": typing.Optional[transformers.generation_logits_process.LogitsProcessorList] = None"},{name:"stopping_criteria",val:": typing.Optional[transformers.generation_stopping_criteria.StoppingCriteriaList] = None"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"pad_token_id",val:": typing.Optional[int] = None"},{name:"eos_token_id",val:": typing.Optional[int] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"output_scores",val:": typing.Optional[bool] = None"},{name:"return_dict_in_generate",val:": typing.Optional[bool] = None"},{name:"synced_gpus",val:": typing.Optional[bool] = False"},{name:"**model_kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_utils.py#L1489",parametersDescription:[{anchor:"transformers.generation_utils.GenerationMixin.greedy_search.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; The sequence used as a prompt for the generation.`,name:"input_ids"},{anchor:"transformers.generation_utils.GenerationMixin.greedy_search.logits_processor",description:`<strong>logits_processor</strong> (<code>LogitsProcessorList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessorList">LogitsProcessorList</a>. List of instances of class derived from <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> used to modify the prediction scores of the language modeling head applied at each generation step.`,name:"logits_processor"},{anchor:"transformers.generation_utils.GenerationMixin.greedy_search.stopping_criteria",description:`<strong>stopping_criteria</strong> (<code>StoppingCriteriaList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.StoppingCriteriaList">StoppingCriteriaList</a>. List of instances of class derived from <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.StoppingCriteria">StoppingCriteria</a> used to tell if the generation loop should stop.`,name:"stopping_criteria"},{anchor:"transformers.generation_utils.GenerationMixin.greedy_search.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>, defaults to 20) &#x2014; <strong>DEPRECATED</strong>. Use <code>logits_processor</code> or <code>stopping_criteria</code> directly to cap the number of generated tokens. The maximum length of the sequence to be generated.`,name:"max_length"},{anchor:"transformers.generation_utils.GenerationMixin.greedy_search.pad_token_id",description:`<strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.`,name:"pad_token_id"},{anchor:"transformers.generation_utils.GenerationMixin.greedy_search.eos_token_id",description:`<strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.`,name:"eos_token_id"},{anchor:"transformers.generation_utils.GenerationMixin.greedy_search.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more details.`,name:"output_attentions"},{anchor:"transformers.generation_utils.GenerationMixin.greedy_search.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more details.`,name:"output_hidden_states"},{anchor:"transformers.generation_utils.GenerationMixin.greedy_search.output_scores",description:`<strong>output_scores</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the prediction scores. See <code>scores</code> under returned tensors for more details.`,name:"output_scores"},{anchor:"transformers.generation_utils.GenerationMixin.greedy_search.return_dict_in_generate",description:`<strong>return_dict_in_generate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict_in_generate"},{anchor:"transformers.generation_utils.GenerationMixin.greedy_search.synced_gpus",description:`<strong>synced_gpus</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs &#x2014; Additional model specific keyword arguments will be forwarded to the <code>forward</code> function of the model. If model is an encoder-decoder model the kwargs should include <code>encoder_outputs</code>.`,name:"synced_gpus"}],returnDescription:` <p><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.generation_utils.GreedySearchDecoderOnlyOutput" >GreedySearchDecoderOnlyOutput</a>, <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.generation_utils.GreedySearchEncoderDecoderOutput" >GreedySearchEncoderDecoderOutput</a> or <code>torch.LongTensor</code>: A <code>torch.LongTensor</code> containing the generated tokens (default behaviour) or a <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.generation_utils.GreedySearchDecoderOnlyOutput" >GreedySearchDecoderOnlyOutput</a> if <code>model.config.is_encoder_decoder=False</code> and <code>return_dict_in_generate=True</code> or a <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.generation_utils.GreedySearchEncoderDecoderOutput" >GreedySearchEncoderDecoderOutput</a> if <code>model.config.is_encoder_decoder=True</code>.</p> `}}),kt=new ye({props:{code:`from transformers import ( AutoTokenizer, AutoModelForCausalLM, LogitsProcessorList, MinLengthLogitsProcessor, StoppingCriteriaList, MaxLengthCriteria, ) tokenizer = AutoTokenizer.from_pretrained("gpt2") model = AutoModelForCausalLM.from_pretrained("gpt2") # set pad_token_id to eos_token_id because GPT2 does not have a EOS token model.config.pad_token_id = model.config.eos_token_id input_prompt = "It might be possible to" input_ids = tokenizer(input_prompt, return_tensors="pt").input_ids # instantiate logits processors logits_processor = LogitsProcessorList( [ MinLengthLogitsProcessor(10, eos_token_id=model.config.eos_token_id), ] ) stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=20)]) outputs = model.greedy_search( input_ids, logits_processor=logits_processor, stopping_criteria=stopping_criteria ) tokenizer.batch_decode(outputs, skip_special_tokens=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ( <span class="hljs-meta">... </span> AutoTokenizer, <span class="hljs-meta">... </span> AutoModelForCausalLM, <span class="hljs-meta">... </span> LogitsProcessorList, <span class="hljs-meta">... </span> MinLengthLogitsProcessor, <span class="hljs-meta">... </span> StoppingCriteriaList, <span class="hljs-meta">... </span> MaxLengthCriteria, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForCausalLM.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># set pad_token_id to eos_token_id because GPT2 does not have a EOS token</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.pad_token_id = model.config.eos_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>input_prompt = <span class="hljs-string">&quot;It might be possible to&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(input_prompt, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate logits processors</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits_processor = LogitsProcessorList( <span class="hljs-meta">... </span> [ <span class="hljs-meta">... </span> MinLengthLogitsProcessor(<span class="hljs-number">10</span>, eos_token_id=model.config.eos_token_id), <span class="hljs-meta">... </span> ] <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=<span class="hljs-number">20</span>)]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.greedy_search( <span class="hljs-meta">... </span> input_ids, logits_processor=logits_processor, stopping_criteria=stopping_criteria <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(outputs, skip_special_tokens=<span class="hljs-literal">True</span>) [<span class="hljs-string">&quot;It might be possible to get a better understanding of the nature of the problem, but it&#x27;s not&quot;</span>]`}}),vt=new be({props:{name:"sample",anchor:"transformers.generation_utils.GenerationMixin.sample",parameters:[{name:"input_ids",val:": LongTensor"},{name:"logits_processor",val:": typing.Optional[transformers.generation_logits_process.LogitsProcessorList] = None"},{name:"stopping_criteria",val:": typing.Optional[transformers.generation_stopping_criteria.StoppingCriteriaList] = None"},{name:"logits_warper",val:": typing.Optional[transformers.generation_logits_process.LogitsProcessorList] = None"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"pad_token_id",val:": typing.Optional[int] = None"},{name:"eos_token_id",val:": typing.Optional[int] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"output_scores",val:": typing.Optional[bool] = None"},{name:"return_dict_in_generate",val:": typing.Optional[bool] = None"},{name:"synced_gpus",val:": typing.Optional[bool] = False"},{name:"**model_kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_utils.py#L1721",parametersDescription:[{anchor:"transformers.generation_utils.GenerationMixin.sample.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; The sequence used as a prompt for the generation.`,name:"input_ids"},{anchor:"transformers.generation_utils.GenerationMixin.sample.logits_processor",description:`<strong>logits_processor</strong> (<code>LogitsProcessorList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessorList">LogitsProcessorList</a>. List of instances of class derived from <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> used to modify the prediction scores of the language modeling head applied at each generation step.`,name:"logits_processor"},{anchor:"transformers.generation_utils.GenerationMixin.sample.stopping_criteria",description:`<strong>stopping_criteria</strong> (<code>StoppingCriteriaList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.StoppingCriteriaList">StoppingCriteriaList</a>. List of instances of class derived from <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.StoppingCriteria">StoppingCriteria</a> used to tell if the generation loop should stop.`,name:"stopping_criteria"},{anchor:"transformers.generation_utils.GenerationMixin.sample.logits_warper",description:`<strong>logits_warper</strong> (<code>LogitsProcessorList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessorList">LogitsProcessorList</a>. List of instances of class derived from <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsWarper">LogitsWarper</a> used to warp the prediction score distribution of the language modeling head applied before multinomial sampling at each generation step.`,name:"logits_warper"},{anchor:"transformers.generation_utils.GenerationMixin.sample.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>, defaults to 20) &#x2014; <strong>DEPRECATED</strong>. Use <code>logits_processor</code> or <code>stopping_criteria</code> directly to cap the number of generated tokens. The maximum length of the sequence to be generated.`,name:"max_length"},{anchor:"transformers.generation_utils.GenerationMixin.sample.pad_token_id",description:`<strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.`,name:"pad_token_id"},{anchor:"transformers.generation_utils.GenerationMixin.sample.eos_token_id",description:`<strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.`,name:"eos_token_id"},{anchor:"transformers.generation_utils.GenerationMixin.sample.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more details.`,name:"output_attentions"},{anchor:"transformers.generation_utils.GenerationMixin.sample.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more details.`,name:"output_hidden_states"},{anchor:"transformers.generation_utils.GenerationMixin.sample.output_scores",description:`<strong>output_scores</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the prediction scores. See <code>scores</code> under returned tensors for more details.`,name:"output_scores"},{anchor:"transformers.generation_utils.GenerationMixin.sample.return_dict_in_generate",description:`<strong>return_dict_in_generate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict_in_generate"},{anchor:"transformers.generation_utils.GenerationMixin.sample.synced_gpus",description:`<strong>synced_gpus</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs &#x2014; Additional model specific kwargs will be forwarded to the <code>forward</code> function of the model. If model is an encoder-decoder model the kwargs should include <code>encoder_outputs</code>.`,name:"synced_gpus"}],returnDescription:` <p><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.generation_utils.SampleDecoderOnlyOutput" >SampleDecoderOnlyOutput</a>, <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.generation_utils.SampleEncoderDecoderOutput" >SampleEncoderDecoderOutput</a> or <code>torch.LongTensor</code>: A <code>torch.LongTensor</code> containing the generated tokens (default behaviour) or a <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.generation_utils.SampleDecoderOnlyOutput" >SampleDecoderOnlyOutput</a> if <code>model.config.is_encoder_decoder=False</code> and <code>return_dict_in_generate=True</code> or a <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.generation_utils.SampleEncoderDecoderOutput" >SampleEncoderDecoderOutput</a> if <code>model.config.is_encoder_decoder=True</code>.</p> `}}),jt=new ye({props:{code:`from transformers import ( AutoTokenizer, AutoModelForCausalLM, LogitsProcessorList, MinLengthLogitsProcessor, TopKLogitsWarper, TemperatureLogitsWarper, StoppingCriteriaList, MaxLengthCriteria, ) import torch tokenizer = AutoTokenizer.from_pretrained("gpt2") model = AutoModelForCausalLM.from_pretrained("gpt2") # set pad_token_id to eos_token_id because GPT2 does not have a EOS token model.config.pad_token_id = model.config.eos_token_id input_prompt = "Today is a beautiful day, and" input_ids = tokenizer(input_prompt, return_tensors="pt").input_ids # instantiate logits processors logits_processor = LogitsProcessorList( [ MinLengthLogitsProcessor(15, eos_token_id=model.config.eos_token_id), ] ) # instantiate logits processors logits_warper = LogitsProcessorList( [ TopKLogitsWarper(50), TemperatureLogitsWarper(0.7), ] ) stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=20)]) torch.manual_seed(0) outputs = model.sample( input_ids, logits_processor=logits_processor, logits_warper=logits_warper, stopping_criteria=stopping_criteria, ) tokenizer.batch_decode(outputs, skip_special_tokens=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ( <span class="hljs-meta">... </span> AutoTokenizer, <span class="hljs-meta">... </span> AutoModelForCausalLM, <span class="hljs-meta">... </span> LogitsProcessorList, <span class="hljs-meta">... </span> MinLengthLogitsProcessor, <span class="hljs-meta">... </span> TopKLogitsWarper, <span class="hljs-meta">... </span> TemperatureLogitsWarper, <span class="hljs-meta">... </span> StoppingCriteriaList, <span class="hljs-meta">... </span> MaxLengthCriteria, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForCausalLM.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># set pad_token_id to eos_token_id because GPT2 does not have a EOS token</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.pad_token_id = model.config.eos_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>input_prompt = <span class="hljs-string">&quot;Today is a beautiful day, and&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(input_prompt, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate logits processors</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits_processor = LogitsProcessorList( <span class="hljs-meta">... </span> [ <span class="hljs-meta">... </span> MinLengthLogitsProcessor(<span class="hljs-number">15</span>, eos_token_id=model.config.eos_token_id), <span class="hljs-meta">... </span> ] <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate logits processors</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits_warper = LogitsProcessorList( <span class="hljs-meta">... </span> [ <span class="hljs-meta">... </span> TopKLogitsWarper(<span class="hljs-number">50</span>), <span class="hljs-meta">... </span> TemperatureLogitsWarper(<span class="hljs-number">0.7</span>), <span class="hljs-meta">... </span> ] <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=<span class="hljs-number">20</span>)]) <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.sample( <span class="hljs-meta">... </span> input_ids, <span class="hljs-meta">... </span> logits_processor=logits_processor, <span class="hljs-meta">... </span> logits_warper=logits_warper, <span class="hljs-meta">... </span> stopping_criteria=stopping_criteria, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(outputs, skip_special_tokens=<span class="hljs-literal">True</span>) [<span class="hljs-string">&#x27;Today is a beautiful day, and a wonderful day.\\n\\nI was lucky enough to meet the&#x27;</span>]`}}),Mt=new be({props:{name:"beam_search",anchor:"transformers.generation_utils.GenerationMixin.beam_search",parameters:[{name:"input_ids",val:": LongTensor"},{name:"beam_scorer",val:": BeamScorer"},{name:"logits_processor",val:": typing.Optional[transformers.generation_logits_process.LogitsProcessorList] = None"},{name:"stopping_criteria",val:": typing.Optional[transformers.generation_stopping_criteria.StoppingCriteriaList] = None"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"pad_token_id",val:": typing.Optional[int] = None"},{name:"eos_token_id",val:": typing.Optional[int] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"output_scores",val:": typing.Optional[bool] = None"},{name:"return_dict_in_generate",val:": typing.Optional[bool] = None"},{name:"synced_gpus",val:": typing.Optional[bool] = False"},{name:"**model_kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_utils.py#L1977",parametersDescription:[{anchor:"transformers.generation_utils.GenerationMixin.beam_search.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; The sequence used as a prompt for the generation.`,name:"input_ids"},{anchor:"transformers.generation_utils.GenerationMixin.beam_search.beam_scorer",description:`<strong>beam_scorer</strong> (<code>BeamScorer</code>) &#x2014; An derived instance of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.BeamScorer">BeamScorer</a> that defines how beam hypotheses are constructed, stored and sorted during generation. For more information, the documentation of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.BeamScorer">BeamScorer</a> should be read.`,name:"beam_scorer"},{anchor:"transformers.generation_utils.GenerationMixin.beam_search.logits_processor",description:`<strong>logits_processor</strong> (<code>LogitsProcessorList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessorList">LogitsProcessorList</a>. List of instances of class derived from <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> used to modify the prediction scores of the language modeling head applied at each generation step.`,name:"logits_processor"},{anchor:"transformers.generation_utils.GenerationMixin.beam_search.stopping_criteria",description:`<strong>stopping_criteria</strong> (<code>StoppingCriteriaList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.StoppingCriteriaList">StoppingCriteriaList</a>. List of instances of class derived from <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.StoppingCriteria">StoppingCriteria</a> used to tell if the generation loop should stop.`,name:"stopping_criteria"},{anchor:"transformers.generation_utils.GenerationMixin.beam_search.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>, defaults to 20) &#x2014; <strong>DEPRECATED</strong>. Use <code>logits_processor</code> or <code>stopping_criteria</code> directly to cap the number of generated tokens. The maximum length of the sequence to be generated.`,name:"max_length"},{anchor:"transformers.generation_utils.GenerationMixin.beam_search.pad_token_id",description:`<strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.`,name:"pad_token_id"},{anchor:"transformers.generation_utils.GenerationMixin.beam_search.eos_token_id",description:`<strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.`,name:"eos_token_id"},{anchor:"transformers.generation_utils.GenerationMixin.beam_search.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more details.`,name:"output_attentions"},{anchor:"transformers.generation_utils.GenerationMixin.beam_search.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more details.`,name:"output_hidden_states"},{anchor:"transformers.generation_utils.GenerationMixin.beam_search.output_scores",description:`<strong>output_scores</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the prediction scores. See <code>scores</code> under returned tensors for more details.`,name:"output_scores"},{anchor:"transformers.generation_utils.GenerationMixin.beam_search.return_dict_in_generate",description:`<strong>return_dict_in_generate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict_in_generate"},{anchor:"transformers.generation_utils.GenerationMixin.beam_search.synced_gpus",description:`<strong>synced_gpus</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs &#x2014; Additional model specific kwargs will be forwarded to the <code>forward</code> function of the model. If model is an encoder-decoder model the kwargs should include <code>encoder_outputs</code>.`,name:"synced_gpus"}],returnDescription:` <p><code>generation_utilsBeamSearchDecoderOnlyOutput</code>, <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.generation_utils.BeamSearchEncoderDecoderOutput" >BeamSearchEncoderDecoderOutput</a> or <code>torch.LongTensor</code>: A <code>torch.LongTensor</code> containing the generated tokens (default behaviour) or a <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.generation_utils.BeamSearchDecoderOnlyOutput" >BeamSearchDecoderOnlyOutput</a> if <code>model.config.is_encoder_decoder=False</code> and <code>return_dict_in_generate=True</code> or a <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.generation_utils.BeamSearchEncoderDecoderOutput" >BeamSearchEncoderDecoderOutput</a> if <code>model.config.is_encoder_decoder=True</code>.</p> `}}),wt=new ye({props:{code:`from transformers import ( AutoTokenizer, AutoModelForSeq2SeqLM, LogitsProcessorList, MinLengthLogitsProcessor, BeamSearchScorer, ) import torch tokenizer = AutoTokenizer.from_pretrained("t5-base") model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") encoder_input_str = "translate English to German: How old are you?" encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids # lets run beam search using 3 beams num_beams = 3 # define decoder start token ids input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) input_ids = input_ids * model.config.decoder_start_token_id # add encoder_outputs to model keyword arguments model_kwargs = { "encoder_outputs": model.get_encoder()( encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True ) } # instantiate beam scorer beam_scorer = BeamSearchScorer( batch_size=1, num_beams=num_beams, device=model.device, ) # instantiate logits processors logits_processor = LogitsProcessorList( [ MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id), ] ) outputs = model.beam_search(input_ids, beam_scorer, logits_processor=logits_processor, **model_kwargs) tokenizer.batch_decode(outputs, skip_special_tokens=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ( <span class="hljs-meta">... </span> AutoTokenizer, <span class="hljs-meta">... </span> AutoModelForSeq2SeqLM, <span class="hljs-meta">... </span> LogitsProcessorList, <span class="hljs-meta">... </span> MinLengthLogitsProcessor, <span class="hljs-meta">... </span> BeamSearchScorer, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;t5-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;t5-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_input_str = <span class="hljs-string">&quot;translate English to German: How old are you?&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_input_ids = tokenizer(encoder_input_str, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># lets run beam search using 3 beams</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_beams = <span class="hljs-number">3</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># define decoder start token ids</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = torch.ones((num_beams, <span class="hljs-number">1</span>), device=model.device, dtype=torch.long) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = input_ids * model.config.decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># add encoder_outputs to model keyword arguments</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model_kwargs = { <span class="hljs-meta">... </span> <span class="hljs-string">&quot;encoder_outputs&quot;</span>: model.get_encoder()( <span class="hljs-meta">... </span> encoder_input_ids.repeat_interleave(num_beams, dim=<span class="hljs-number">0</span>), return_dict=<span class="hljs-literal">True</span> <span class="hljs-meta">... </span> ) <span class="hljs-meta">... </span>} <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate beam scorer</span> <span class="hljs-meta">&gt;&gt;&gt; </span>beam_scorer = BeamSearchScorer( <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">1</span>, <span class="hljs-meta">... </span> num_beams=num_beams, <span class="hljs-meta">... </span> device=model.device, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate logits processors</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits_processor = LogitsProcessorList( <span class="hljs-meta">... </span> [ <span class="hljs-meta">... </span> MinLengthLogitsProcessor(<span class="hljs-number">5</span>, eos_token_id=model.config.eos_token_id), <span class="hljs-meta">... </span> ] <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.beam_search(input_ids, beam_scorer, logits_processor=logits_processor, **model_kwargs) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(outputs, skip_special_tokens=<span class="hljs-literal">True</span>) [<span class="hljs-string">&#x27;Wie alt bist du?&#x27;</span>]`}}),Tt=new be({props:{name:"beam_sample",anchor:"transformers.generation_utils.GenerationMixin.beam_sample",parameters:[{name:"input_ids",val:": LongTensor"},{name:"beam_scorer",val:": BeamScorer"},{name:"logits_processor",val:": typing.Optional[transformers.generation_logits_process.LogitsProcessorList] = None"},{name:"stopping_criteria",val:": typing.Optional[transformers.generation_stopping_criteria.StoppingCriteriaList] = None"},{name:"logits_warper",val:": typing.Optional[transformers.generation_logits_process.LogitsProcessorList] = None"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"pad_token_id",val:": typing.Optional[int] = None"},{name:"eos_token_id",val:": typing.Optional[int] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"output_scores",val:": typing.Optional[bool] = None"},{name:"return_dict_in_generate",val:": typing.Optional[bool] = None"},{name:"synced_gpus",val:": typing.Optional[bool] = False"},{name:"**model_kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_utils.py#L2289",parametersDescription:[{anchor:"transformers.generation_utils.GenerationMixin.beam_sample.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; The sequence used as a prompt for the generation.`,name:"input_ids"},{anchor:"transformers.generation_utils.GenerationMixin.beam_sample.beam_scorer",description:`<strong>beam_scorer</strong> (<code>BeamScorer</code>) &#x2014; A derived instance of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.BeamScorer">BeamScorer</a> that defines how beam hypotheses are constructed, stored and sorted during generation. For more information, the documentation of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.BeamScorer">BeamScorer</a> should be read.`,name:"beam_scorer"},{anchor:"transformers.generation_utils.GenerationMixin.beam_sample.logits_processor",description:`<strong>logits_processor</strong> (<code>LogitsProcessorList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessorList">LogitsProcessorList</a>. List of instances of class derived from <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> used to modify the prediction scores of the language modeling head applied at each generation step.`,name:"logits_processor"},{anchor:"transformers.generation_utils.GenerationMixin.beam_sample.stopping_criteria",description:`<strong>stopping_criteria</strong> (<code>StoppingCriteriaList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.StoppingCriteriaList">StoppingCriteriaList</a>. List of instances of class derived from <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.StoppingCriteria">StoppingCriteria</a> used to tell if the generation loop should stop.`,name:"stopping_criteria"},{anchor:"transformers.generation_utils.GenerationMixin.beam_sample.logits_warper",description:`<strong>logits_warper</strong> (<code>LogitsProcessorList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessorList">LogitsProcessorList</a>. List of instances of class derived from <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsWarper">LogitsWarper</a> used to warp the prediction score distribution of the language modeling head applied before multinomial sampling at each generation step.`,name:"logits_warper"},{anchor:"transformers.generation_utils.GenerationMixin.beam_sample.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>, defaults to 20) &#x2014; <strong>DEPRECATED</strong>. Use <code>logits_processor</code> or <code>stopping_criteria</code> directly to cap the number of generated tokens. The maximum length of the sequence to be generated.`,name:"max_length"},{anchor:"transformers.generation_utils.GenerationMixin.beam_sample.pad_token_id",description:`<strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.`,name:"pad_token_id"},{anchor:"transformers.generation_utils.GenerationMixin.beam_sample.eos_token_id",description:`<strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.`,name:"eos_token_id"},{anchor:"transformers.generation_utils.GenerationMixin.beam_sample.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more details.`,name:"output_attentions"},{anchor:"transformers.generation_utils.GenerationMixin.beam_sample.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more details.`,name:"output_hidden_states"},{anchor:"transformers.generation_utils.GenerationMixin.beam_sample.output_scores",description:`<strong>output_scores</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the prediction scores. See <code>scores</code> under returned tensors for more details.`,name:"output_scores"},{anchor:"transformers.generation_utils.GenerationMixin.beam_sample.return_dict_in_generate",description:`<strong>return_dict_in_generate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict_in_generate"},{anchor:"transformers.generation_utils.GenerationMixin.beam_sample.synced_gpus",description:`<strong>synced_gpus</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs &#x2014; Additional model specific kwargs will be forwarded to the <code>forward</code> function of the model. If model is an encoder-decoder model the kwargs should include <code>encoder_outputs</code>.`,name:"synced_gpus"}],returnDescription:` <p><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.generation_utils.BeamSampleDecoderOnlyOutput" >BeamSampleDecoderOnlyOutput</a>, <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.generation_utils.BeamSampleEncoderDecoderOutput" >BeamSampleEncoderDecoderOutput</a> or <code>torch.LongTensor</code>: A <code>torch.LongTensor</code> containing the generated tokens (default behaviour) or a <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.generation_utils.BeamSampleDecoderOnlyOutput" >BeamSampleDecoderOnlyOutput</a> if <code>model.config.is_encoder_decoder=False</code> and <code>return_dict_in_generate=True</code> or a <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.generation_utils.BeamSampleEncoderDecoderOutput" >BeamSampleEncoderDecoderOutput</a> if <code>model.config.is_encoder_decoder=True</code>.</p> `}}),Ot=new ye({props:{code:`from transformers import ( AutoTokenizer, AutoModelForSeq2SeqLM, LogitsProcessorList, MinLengthLogitsProcessor, TopKLogitsWarper, TemperatureLogitsWarper, BeamSearchScorer, ) import torch tokenizer = AutoTokenizer.from_pretrained("t5-base") model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") encoder_input_str = "translate English to German: How old are you?" encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids # lets run beam search using 3 beams num_beams = 3 # define decoder start token ids input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) input_ids = input_ids * model.config.decoder_start_token_id # add encoder_outputs to model keyword arguments model_kwargs = { "encoder_outputs": model.get_encoder()( encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True ) } # instantiate beam scorer beam_scorer = BeamSearchScorer( batch_size=1, max_length=model.config.max_length, num_beams=num_beams, device=model.device, ) # instantiate logits processors logits_processor = LogitsProcessorList( [MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id)] ) # instantiate logits processors logits_warper = LogitsProcessorList( [ TopKLogitsWarper(50), TemperatureLogitsWarper(0.7), ] ) outputs = model.beam_sample( input_ids, beam_scorer, logits_processor=logits_processor, logits_warper=logits_warper, **model_kwargs ) tokenizer.batch_decode(outputs, skip_special_tokens=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ( <span class="hljs-meta">... </span> AutoTokenizer, <span class="hljs-meta">... </span> AutoModelForSeq2SeqLM, <span class="hljs-meta">... </span> LogitsProcessorList, <span class="hljs-meta">... </span> MinLengthLogitsProcessor, <span class="hljs-meta">... </span> TopKLogitsWarper, <span class="hljs-meta">... </span> TemperatureLogitsWarper, <span class="hljs-meta">... </span> BeamSearchScorer, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;t5-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;t5-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_input_str = <span class="hljs-string">&quot;translate English to German: How old are you?&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_input_ids = tokenizer(encoder_input_str, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># lets run beam search using 3 beams</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_beams = <span class="hljs-number">3</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># define decoder start token ids</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = torch.ones((num_beams, <span class="hljs-number">1</span>), device=model.device, dtype=torch.long) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = input_ids * model.config.decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># add encoder_outputs to model keyword arguments</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model_kwargs = { <span class="hljs-meta">... </span> <span class="hljs-string">&quot;encoder_outputs&quot;</span>: model.get_encoder()( <span class="hljs-meta">... </span> encoder_input_ids.repeat_interleave(num_beams, dim=<span class="hljs-number">0</span>), return_dict=<span class="hljs-literal">True</span> <span class="hljs-meta">... </span> ) <span class="hljs-meta">... </span>} <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate beam scorer</span> <span class="hljs-meta">&gt;&gt;&gt; </span>beam_scorer = BeamSearchScorer( <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">1</span>, <span class="hljs-meta">... </span> max_length=model.config.max_length, <span class="hljs-meta">... </span> num_beams=num_beams, <span class="hljs-meta">... </span> device=model.device, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate logits processors</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits_processor = LogitsProcessorList( <span class="hljs-meta">... </span> [MinLengthLogitsProcessor(<span class="hljs-number">5</span>, eos_token_id=model.config.eos_token_id)] <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate logits processors</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits_warper = LogitsProcessorList( <span class="hljs-meta">... </span> [ <span class="hljs-meta">... </span> TopKLogitsWarper(<span class="hljs-number">50</span>), <span class="hljs-meta">... </span> TemperatureLogitsWarper(<span class="hljs-number">0.7</span>), <span class="hljs-meta">... </span> ] <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.beam_sample( <span class="hljs-meta">... </span> input_ids, beam_scorer, logits_processor=logits_processor, logits_warper=logits_warper, **model_kwargs <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(outputs, skip_special_tokens=<span class="hljs-literal">True</span>) [<span class="hljs-string">&#x27;Wie alt bist du?&#x27;</span>]`}}),qt=new be({props:{name:"group_beam_search",anchor:"transformers.generation_utils.GenerationMixin.group_beam_search",parameters:[{name:"input_ids",val:": LongTensor"},{name:"beam_scorer",val:": BeamScorer"},{name:"logits_processor",val:": typing.Optional[transformers.generation_logits_process.LogitsProcessorList] = None"},{name:"stopping_criteria",val:": typing.Optional[transformers.generation_stopping_criteria.StoppingCriteriaList] = None"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"pad_token_id",val:": typing.Optional[int] = None"},{name:"eos_token_id",val:": typing.Optional[int] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"output_scores",val:": typing.Optional[bool] = None"},{name:"return_dict_in_generate",val:": typing.Optional[bool] = None"},{name:"synced_gpus",val:": typing.Optional[bool] = False"},{name:"**model_kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_utils.py#L2611",parametersDescription:[{anchor:"transformers.generation_utils.GenerationMixin.group_beam_search.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; The sequence used as a prompt for the generation.`,name:"input_ids"},{anchor:"transformers.generation_utils.GenerationMixin.group_beam_search.beam_scorer",description:`<strong>beam_scorer</strong> (<code>BeamScorer</code>) &#x2014; An derived instance of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.BeamScorer">BeamScorer</a> that defines how beam hypotheses are constructed, stored and sorted during generation. For more information, the documentation of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.BeamScorer">BeamScorer</a> should be read.`,name:"beam_scorer"},{anchor:"transformers.generation_utils.GenerationMixin.group_beam_search.logits_processor",description:`<strong>logits_processor</strong> (<code>LogitsProcessorList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessorList">LogitsProcessorList</a>. List of instances of class derived from <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> used to modify the prediction scores of the language modeling head applied at each generation step.`,name:"logits_processor"},{anchor:"transformers.generation_utils.GenerationMixin.group_beam_search.stopping_criteria",description:`<strong>stopping_criteria</strong> (<code>StoppingCriteriaList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.StoppingCriteriaList">StoppingCriteriaList</a>. List of instances of class derived from <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.StoppingCriteria">StoppingCriteria</a> used to tell if the generation loop should stop.`,name:"stopping_criteria"},{anchor:"transformers.generation_utils.GenerationMixin.group_beam_search.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>, defaults to 20) &#x2014; <strong>DEPRECATED</strong>. Use <code>logits_processor</code> or <code>stopping_criteria</code> directly to cap the number of generated tokens. The maximum length of the sequence to be generated.`,name:"max_length"},{anchor:"transformers.generation_utils.GenerationMixin.group_beam_search.pad_token_id",description:`<strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.`,name:"pad_token_id"},{anchor:"transformers.generation_utils.GenerationMixin.group_beam_search.eos_token_id",description:`<strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.`,name:"eos_token_id"},{anchor:"transformers.generation_utils.GenerationMixin.group_beam_search.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more details.`,name:"output_attentions"},{anchor:"transformers.generation_utils.GenerationMixin.group_beam_search.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more details.`,name:"output_hidden_states"},{anchor:"transformers.generation_utils.GenerationMixin.group_beam_search.output_scores",description:`<strong>output_scores</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the prediction scores. See <code>scores</code> under returned tensors for more details.`,name:"output_scores"},{anchor:"transformers.generation_utils.GenerationMixin.group_beam_search.return_dict_in_generate",description:`<strong>return_dict_in_generate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict_in_generate"},{anchor:"transformers.generation_utils.GenerationMixin.group_beam_search.synced_gpus",description:`<strong>synced_gpus</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to continue running the while loop until max_length (needed for ZeRO stage 3)</p> <p>model_kwargs &#x2014; Additional model specific kwargs that will be forwarded to the <code>forward</code> function of the model. If model is an encoder-decoder model the kwargs should include <code>encoder_outputs</code>.`,name:"synced_gpus"}],returnDescription:` <p><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.generation_utils.BeamSearchDecoderOnlyOutput" >BeamSearchDecoderOnlyOutput</a>, <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.generation_utils.BeamSearchEncoderDecoderOutput" >BeamSearchEncoderDecoderOutput</a> or <code>torch.LongTensor</code>: A <code>torch.LongTensor</code> containing the generated tokens (default behaviour) or a <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.generation_utils.BeamSearchDecoderOnlyOutput" >BeamSearchDecoderOnlyOutput</a> if <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.generation_utils.BeamSearchDecoderOnlyOutput" >BeamSearchDecoderOnlyOutput</a> if <code>model.config.is_encoder_decoder=False</code> and <code>return_dict_in_generate=True</code> or a <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.generation_utils.BeamSearchEncoderDecoderOutput" >BeamSearchEncoderDecoderOutput</a> if <code>model.config.is_encoder_decoder=True</code>.</p> `}}),St=new ye({props:{code:`from transformers import ( AutoTokenizer, AutoModelForSeq2SeqLM, LogitsProcessorList, MinLengthLogitsProcessor, HammingDiversityLogitsProcessor, BeamSearchScorer, ) import torch tokenizer = AutoTokenizer.from_pretrained("t5-base") model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") encoder_input_str = "translate English to German: How old are you?" encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids # lets run diverse beam search using 6 beams num_beams = 6 # define decoder start token ids input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) input_ids = input_ids * model.config.decoder_start_token_id # add encoder_outputs to model keyword arguments model_kwargs = { "encoder_outputs": model.get_encoder()( encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True ) } # instantiate beam scorer beam_scorer = BeamSearchScorer( batch_size=1, max_length=model.config.max_length, num_beams=num_beams, device=model.device, num_beam_groups=3, ) # instantiate logits processors logits_processor = LogitsProcessorList( [ HammingDiversityLogitsProcessor(5.5, num_beams=6, num_beam_groups=3), MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id), ] ) outputs = model.group_beam_search( input_ids, beam_scorer, logits_processor=logits_processor, **model_kwargs ) tokenizer.batch_decode(outputs, skip_special_tokens=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ( <span class="hljs-meta">... </span> AutoTokenizer, <span class="hljs-meta">... </span> AutoModelForSeq2SeqLM, <span class="hljs-meta">... </span> LogitsProcessorList, <span class="hljs-meta">... </span> MinLengthLogitsProcessor, <span class="hljs-meta">... </span> HammingDiversityLogitsProcessor, <span class="hljs-meta">... </span> BeamSearchScorer, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;t5-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;t5-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_input_str = <span class="hljs-string">&quot;translate English to German: How old are you?&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_input_ids = tokenizer(encoder_input_str, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># lets run diverse beam search using 6 beams</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_beams = <span class="hljs-number">6</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># define decoder start token ids</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = torch.ones((num_beams, <span class="hljs-number">1</span>), device=model.device, dtype=torch.long) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = input_ids * model.config.decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># add encoder_outputs to model keyword arguments</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model_kwargs = { <span class="hljs-meta">... </span> <span class="hljs-string">&quot;encoder_outputs&quot;</span>: model.get_encoder()( <span class="hljs-meta">... </span> encoder_input_ids.repeat_interleave(num_beams, dim=<span class="hljs-number">0</span>), return_dict=<span class="hljs-literal">True</span> <span class="hljs-meta">... </span> ) <span class="hljs-meta">... </span>} <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate beam scorer</span> <span class="hljs-meta">&gt;&gt;&gt; </span>beam_scorer = BeamSearchScorer( <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">1</span>, <span class="hljs-meta">... </span> max_length=model.config.max_length, <span class="hljs-meta">... </span> num_beams=num_beams, <span class="hljs-meta">... </span> device=model.device, <span class="hljs-meta">... </span> num_beam_groups=<span class="hljs-number">3</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate logits processors</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits_processor = LogitsProcessorList( <span class="hljs-meta">... </span> [ <span class="hljs-meta">... </span> HammingDiversityLogitsProcessor(<span class="hljs-number">5.5</span>, num_beams=<span class="hljs-number">6</span>, num_beam_groups=<span class="hljs-number">3</span>), <span class="hljs-meta">... </span> MinLengthLogitsProcessor(<span class="hljs-number">5</span>, eos_token_id=model.config.eos_token_id), <span class="hljs-meta">... </span> ] <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.group_beam_search( <span class="hljs-meta">... </span> input_ids, beam_scorer, logits_processor=logits_processor, **model_kwargs <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(outputs, skip_special_tokens=<span class="hljs-literal">True</span>) [<span class="hljs-string">&#x27;Wie alt bist du?&#x27;</span>]`}}),$t=new be({props:{name:"constrained_beam_search",anchor:"transformers.generation_utils.GenerationMixin.constrained_beam_search",parameters:[{name:"input_ids",val:": LongTensor"},{name:"constrained_beam_scorer",val:": ConstrainedBeamSearchScorer"},{name:"logits_processor",val:": typing.Optional[transformers.generation_logits_process.LogitsProcessorList] = None"},{name:"stopping_criteria",val:": typing.Optional[transformers.generation_stopping_criteria.StoppingCriteriaList] = None"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"pad_token_id",val:": typing.Optional[int] = None"},{name:"eos_token_id",val:": typing.Optional[int] = None"},{name:"output_attentions",val:": typing.Optional[bool] = None"},{name:"output_hidden_states",val:": typing.Optional[bool] = None"},{name:"output_scores",val:": typing.Optional[bool] = None"},{name:"return_dict_in_generate",val:": typing.Optional[bool] = None"},{name:"synced_gpus",val:": typing.Optional[bool] = None"},{name:"**model_kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_utils.py#L2976",parametersDescription:[{anchor:"transformers.generation_utils.GenerationMixin.constrained_beam_search.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; The sequence used as a prompt for the generation.`,name:"input_ids"},{anchor:"transformers.generation_utils.GenerationMixin.constrained_beam_search.constrained_beam_scorer",description:`<strong>constrained_beam_scorer</strong> (<code>ConstrainedBeamSearchScorer</code>) &#x2014; A derived instance of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.BeamScorer">BeamScorer</a> that defines how beam hypotheses are constructed, stored and sorted during generation, while satisfying a list of positive constraints. For more information, the documentation of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.ConstrainedBeamSearchScorer">ConstrainedBeamSearchScorer</a> should be read.`,name:"constrained_beam_scorer"},{anchor:"transformers.generation_utils.GenerationMixin.constrained_beam_search.logits_processor",description:`<strong>logits_processor</strong> (<code>LogitsProcessorList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessorList">LogitsProcessorList</a>. List of instances of class derived from <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> used to modify the prediction scores of the language modeling head applied at each generation step.`,name:"logits_processor"},{anchor:"transformers.generation_utils.GenerationMixin.constrained_beam_search.stopping_criteria",description:`<strong>stopping_criteria</strong> (<code>StoppingCriteriaList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.StoppingCriteriaList">StoppingCriteriaList</a>. List of instances of class derived from <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.StoppingCriteria">StoppingCriteria</a> used to tell if the generation loop should stop.`,name:"stopping_criteria"},{anchor:"transformers.generation_utils.GenerationMixin.constrained_beam_search.logits_warper",description:`<strong>logits_warper</strong> (<code>LogitsProcessorList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessorList">LogitsProcessorList</a>. List of instances of class derived from <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsWarper">LogitsWarper</a> used to warp the prediction score distribution of the language modeling head applied before multinomial sampling at each generation step.`,name:"logits_warper"},{anchor:"transformers.generation_utils.GenerationMixin.constrained_beam_search.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>, defaults to 20) &#x2014; <strong>DEPRECATED</strong>. Use <code>logits_processor</code> or <code>stopping_criteria</code> directly to cap the number of generated tokens. The maximum length of the sequence to be generated.`,name:"max_length"},{anchor:"transformers.generation_utils.GenerationMixin.constrained_beam_search.pad_token_id",description:`<strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.`,name:"pad_token_id"},{anchor:"transformers.generation_utils.GenerationMixin.constrained_beam_search.eos_token_id",description:`<strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.`,name:"eos_token_id"},{anchor:"transformers.generation_utils.GenerationMixin.constrained_beam_search.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more details.`,name:"output_attentions"},{anchor:"transformers.generation_utils.GenerationMixin.constrained_beam_search.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more details.`,name:"output_hidden_states"},{anchor:"transformers.generation_utils.GenerationMixin.constrained_beam_search.output_scores",description:`<strong>output_scores</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the prediction scores. See <code>scores</code> under returned tensors for more details.`,name:"output_scores"},{anchor:"transformers.generation_utils.GenerationMixin.constrained_beam_search.return_dict_in_generate",description:`<strong>return_dict_in_generate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict_in_generate"},{anchor:"transformers.generation_utils.GenerationMixin.constrained_beam_search.synced_gpus",description:`<strong>synced_gpus</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs &#x2014; Additional model specific kwargs will be forwarded to the <code>forward</code> function of the model. If model is an encoder-decoder model the kwargs should include <code>encoder_outputs</code>.`,name:"synced_gpus"}],returnDescription:` <p><code>generation_utilsBeamSearchDecoderOnlyOutput</code>, <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.generation_utils.BeamSearchEncoderDecoderOutput" >BeamSearchEncoderDecoderOutput</a> or <code>torch.LongTensor</code>: A <code>torch.LongTensor</code> containing the generated tokens (default behaviour) or a <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.generation_utils.BeamSearchDecoderOnlyOutput" >BeamSearchDecoderOnlyOutput</a> if <code>model.config.is_encoder_decoder=False</code> and <code>return_dict_in_generate=True</code> or a <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.generation_utils.BeamSearchEncoderDecoderOutput" >BeamSearchEncoderDecoderOutput</a> if <code>model.config.is_encoder_decoder=True</code>.</p> `}}),At=new ye({props:{code:`from transformers import ( AutoTokenizer, AutoModelForSeq2SeqLM, LogitsProcessorList, MinLengthLogitsProcessor, ConstrainedBeamSearchScorer, PhrasalConstraint, ) import torch tokenizer = AutoTokenizer.from_pretrained("t5-base") model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") encoder_input_str = "translate English to German: How old are you?" encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids # lets run beam search using 3 beams num_beams = 3 # define decoder start token ids input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) input_ids = input_ids * model.config.decoder_start_token_id # add encoder_outputs to model keyword arguments model_kwargs = { "encoder_outputs": model.get_encoder()( encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True ) } constraint_str = "sind" constraint_token_ids = tokenizer.encode(constraint_str)[:-1] # slice to remove eos token constraints = [PhrasalConstraint(token_ids=constraint_token_ids)] # instantiate beam scorer beam_scorer = ConstrainedBeamSearchScorer( batch_size=1, num_beams=num_beams, device=model.device, constraints=constraints ) # instantiate logits processors logits_processor = LogitsProcessorList( [ MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id), ] ) outputs = model.constrained_beam_search( input_ids, beam_scorer, constraints=constraints, logits_processor=logits_processor, **model_kwargs ) tokenizer.batch_decode(outputs, skip_special_tokens=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ( <span class="hljs-meta">... </span> AutoTokenizer, <span class="hljs-meta">... </span> AutoModelForSeq2SeqLM, <span class="hljs-meta">... </span> LogitsProcessorList, <span class="hljs-meta">... </span> MinLengthLogitsProcessor, <span class="hljs-meta">... </span> ConstrainedBeamSearchScorer, <span class="hljs-meta">... </span> PhrasalConstraint, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;t5-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;t5-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_input_str = <span class="hljs-string">&quot;translate English to German: How old are you?&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_input_ids = tokenizer(encoder_input_str, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># lets run beam search using 3 beams</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_beams = <span class="hljs-number">3</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># define decoder start token ids</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = torch.ones((num_beams, <span class="hljs-number">1</span>), device=model.device, dtype=torch.long) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = input_ids * model.config.decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># add encoder_outputs to model keyword arguments</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model_kwargs = { <span class="hljs-meta">... </span> <span class="hljs-string">&quot;encoder_outputs&quot;</span>: model.get_encoder()( <span class="hljs-meta">... </span> encoder_input_ids.repeat_interleave(num_beams, dim=<span class="hljs-number">0</span>), return_dict=<span class="hljs-literal">True</span> <span class="hljs-meta">... </span> ) <span class="hljs-meta">... </span>} <span class="hljs-meta">&gt;&gt;&gt; </span>constraint_str = <span class="hljs-string">&quot;sind&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>constraint_token_ids = tokenizer.encode(constraint_str)[:-<span class="hljs-number">1</span>] <span class="hljs-comment"># slice to remove eos token</span> <span class="hljs-meta">&gt;&gt;&gt; </span>constraints = [PhrasalConstraint(token_ids=constraint_token_ids)] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate beam scorer</span> <span class="hljs-meta">&gt;&gt;&gt; </span>beam_scorer = ConstrainedBeamSearchScorer( <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">1</span>, num_beams=num_beams, device=model.device, constraints=constraints <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate logits processors</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits_processor = LogitsProcessorList( <span class="hljs-meta">... </span> [ <span class="hljs-meta">... </span> MinLengthLogitsProcessor(<span class="hljs-number">5</span>, eos_token_id=model.config.eos_token_id), <span class="hljs-meta">... </span> ] <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.constrained_beam_search( <span class="hljs-meta">... </span> input_ids, beam_scorer, constraints=constraints, logits_processor=logits_processor, **model_kwargs <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(outputs, skip_special_tokens=<span class="hljs-literal">True</span>) [<span class="hljs-string">&#x27;Wie alt sind Sie?&#x27;</span>]`}}),zt=new Cs({}),Pt=new be({props:{name:"class transformers.generation_tf_utils.TFGenerationMixin",anchor:"transformers.generation_tf_utils.TFGenerationMixin",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_tf_utils.py#L342"}}),Dt=new be({props:{name:"generate",anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate",parameters:[{name:"input_ids",val:" = None"},{name:"max_length",val:" = None"},{name:"min_length",val:" = None"},{name:"do_sample",val:" = None"},{name:"early_stopping",val:" = None"},{name:"num_beams",val:" = None"},{name:"temperature",val:" = None"},{name:"top_k",val:" = None"},{name:"top_p",val:" = None"},{name:"repetition_penalty",val:" = None"},{name:"bad_words_ids",val:" = None"},{name:"bos_token_id",val:" = None"},{name:"pad_token_id",val:" = None"},{name:"eos_token_id",val:" = None"},{name:"length_penalty",val:" = None"},{name:"no_repeat_ngram_size",val:" = None"},{name:"num_return_sequences",val:" = None"},{name:"attention_mask",val:" = None"},{name:"decoder_start_token_id",val:" = None"},{name:"use_cache",val:" = None"},{name:"output_scores",val:" = None"},{name:"output_attentions",val:" = None"},{name:"output_hidden_states",val:" = None"},{name:"return_dict_in_generate",val:" = None"},{name:"forced_bos_token_id",val:" = None"},{name:"forced_eos_token_id",val:" = None"},{name:"**model_kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_tf_utils.py#L362",parametersDescription:[{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.input_ids",description:"<strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, `(batch_size, sequence_length, &#x2014;",name:"input_ids"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.feature_dim)`",description:`<strong>feature_dim)\`</strong> or <code>(batch_size, num_channels, height, width)</code>, <em>optional</em>) &#x2014; The sequence used as a prompt for the generation or as model inputs to the encoder. If <code>None</code> the method initializes it with <code>bos_token_id</code> and a batch size of 1. For decoder-only models <code>inputs</code> should of in the format of <code>input_ids</code>. For encoder-decoder models <em>inputs</em> can represent any of <code>input_ids</code>, <code>input_values</code>, <code>input_features</code>, or <code>pixel_values</code>.`,name:"feature_dim)`"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>, defaults to 20) &#x2014; The maximum length of the sequence to be generated.`,name:"max_length"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.min_length",description:`<strong>min_length</strong> (<code>int</code>, <em>optional</em>, defaults to 10) &#x2014; The minimum length of the sequence to be generated.`,name:"min_length"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.do_sample",description:`<strong>do_sample</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use sampling ; use greedy decoding otherwise.`,name:"do_sample"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.early_stopping",description:`<strong>early_stopping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to stop the beam search when at least <code>num_beams</code> sentences are finished per batch or not.`,name:"early_stopping"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.num_beams",description:`<strong>num_beams</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Number of beams for beam search. 1 means no beam search.`,name:"num_beams"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.temperature",description:`<strong>temperature</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; The value used to module the next token probabilities.`,name:"temperature"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.top_k",description:`<strong>top_k</strong> (<code>int</code>, <em>optional</em>, defaults to 50) &#x2014; The number of highest probability vocabulary tokens to keep for top-k-filtering.`,name:"top_k"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.top_p",description:`<strong>top_p</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; If set to float &lt; 1, only the most probable tokens with probabilities that add up to <code>top_p</code> or higher are kept for generation.`,name:"top_p"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.repetition_penalty",description:`<strong>repetition_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; The parameter for repetition penalty. 1.0 means no penalty. See <a href="https://arxiv.org/pdf/1909.05858.pdf" rel="nofollow">this paper</a> for more details.`,name:"repetition_penalty"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.pad_token_id",description:`<strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.`,name:"pad_token_id"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.bos_token_id",description:`<strong>bos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>beginning-of-sequence</em> token.`,name:"bos_token_id"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.eos_token_id",description:`<strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.`,name:"eos_token_id"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.length_penalty",description:`<strong>length_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; Exponential penalty to the length. 1.0 means no penalty.</p> <p>Set to values &lt; 1.0 in order to encourage the model to generate shorter sequences, to a value &gt; 1.0 in order to encourage the model to produce longer sequences.`,name:"length_penalty"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.no_repeat_ngram_size",description:`<strong>no_repeat_ngram_size</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to int &gt; 0, all ngrams of that size can only occur once.`,name:"no_repeat_ngram_size"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.bad_words_ids(List[int],",description:`<strong>bad_words_ids(<code>List[int]</code>,</strong> <em>optional</em>) &#x2014; List of token ids that are not allowed to be generated. In order to get the tokens of the words that should not appear in the generated text, use <code>tokenizer.encode(bad_word, add_prefix_space=True)</code>.`,name:"bad_words_ids(List[int],"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.num_return_sequences(int,",description:`<strong>num_return_sequences(<code>int</code>,</strong> <em>optional</em>, defaults to 1) &#x2014; The number of independently computed returned sequences for each element in the batch.`,name:"num_return_sequences(int,"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.attention_mask",description:`<strong>attention_mask</strong> (<code>tf.Tensor</code> of <code>dtype=tf.int32</code> and shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values are in <code>[0, 1]</code>, 1 for tokens that are not masked, and 0 for masked tokens.</p> <p>If not provided, will default to a tensor the same shape as <code>input_ids</code> that masks the pad token.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"attention_mask"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.decoder_start_token_id",description:`<strong>decoder_start_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; If an encoder-decoder model starts decoding with a different token than <em>bos</em>, the id of that token. use_cache &#x2014; (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>): Whether or not the model should use the past last key/values attentions (if applicable to the model) to speed up decoding.`,name:"decoder_start_token_id"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.output_attentions",description:`<strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more details.`,name:"output_attentions"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.output_hidden_states",description:`<strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more details.`,name:"output_hidden_states"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.output_scores",description:`<strong>output_scores</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the prediction scores. See <code>scores</code> under returned tensors for more details.`,name:"output_scores"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.return_dict_in_generate",description:`<strong>return_dict_in_generate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict_in_generate"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.forced_bos_token_id",description:`<strong>forced_bos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the token to force as the first generated token after the <code>decoder_start_token_id</code>. Useful for multilingual models like <a href="../model_doc/mbart">mBART</a> where the first generated token needs to be the target language token.`,name:"forced_bos_token_id"},{anchor:"transformers.generation_tf_utils.TFGenerationMixin.generate.forced_eos_token_id",description:`<strong>forced_eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the token to force as the last generated token when <code>max_length</code> is reached. model_specific_kwargs &#x2014; Additional model specific kwargs will be forwarded to the <code>forward</code> function of the model.`,name:"forced_eos_token_id"}],returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput" >ModelOutput</a> or <code>tf.Tensor</code></p> `}}),Ht=new ye({props:{code:`tokenizer = AutoTokenizer.from_pretrained("distilgpt2") # Initialize tokenizer model = TFAutoModelWithLMHead.from_pretrained( "distilgpt2" ) # Download model and configuration from huggingface.co and cache. outputs = model.generate(max_length=40) # do greedy decoding print(f"Generated: {tokenizer.decode(outputs[0], skip_special_tokens=True)}") tokenizer = AutoTokenizer.from_pretrained("openai-gpt") # Initialize tokenizer model = TFAutoModelWithLMHead.from_pretrained( "openai-gpt" ) # Download model and configuration from huggingface.co and cache. input_context = "The dog" input_ids = tokenizer.encode(input_context, return_tensors="tf") # encode input context outputs = model.generate( input_ids=input_ids, num_beams=5, num_return_sequences=3, temperature=1.5 ) # generate 3 independent sequences using beam search decoding (5 beams) with sampling from initial context 'The dog' for i in range(3): # 3 output sequences were generated print(f"Generated {i}: {tokenizer.decode(outputs[i], skip_special_tokens=True)}") tokenizer = AutoTokenizer.from_pretrained("distilgpt2") # Initialize tokenizer model = TFAutoModelWithLMHead.from_pretrained( "distilgpt2" ) # Download model and configuration from huggingface.co and cache. input_context = "The dog" input_ids = tokenizer.encode(input_context, return_tensors="tf") # encode input context outputs = model.generate( input_ids=input_ids, max_length=40, temperature=0.7, num_return_sequences=3, do_sample=True ) # generate 3 candidates using sampling for i in range(3): # 3 output sequences were generated print(f"Generated {i}: {tokenizer.decode(outputs[i], skip_special_tokens=True)}") tokenizer = AutoTokenizer.from_pretrained("ctrl") # Initialize tokenizer model = TFAutoModelWithLMHead.from_pretrained( "ctrl" ) # Download model and configuration from huggingface.co and cache. input_context = "Legal My neighbor is" # "Legal" is one of the control codes for ctrl input_ids = tokenizer.encode(input_context, return_tensors="tf") # encode input context outputs = model.generate( input_ids=input_ids, max_length=50, temperature=0.7, repetition_penalty=1.2 ) # generate sequences print(f"Generated: {tokenizer.decode(outputs[0], skip_special_tokens=True)}") tokenizer = AutoTokenizer.from_pretrained("gpt2") # Initialize tokenizer model = TFAutoModelWithLMHead.from_pretrained( "gpt2" ) # Download model and configuration from huggingface.co and cache. input_context = "My cute dog" bad_words_ids = [ tokenizer.encode(bad_word, add_prefix_space=True) for bad_word in ["idiot", "stupid", "shut up"] ] input_ids = tokenizer.encode(input_context, return_tensors="tf") # encode input context outputs = model.generate( input_ids=input_ids, max_length=100, do_sample=True, bad_words_ids=bad_words_ids ) # generate sequences without allowing bad_words to be generated`,highlighted:`tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilgpt2&quot;</span>) <span class="hljs-comment"># Initialize tokenizer</span> model = TFAutoModelWithLMHead.from_pretrained( <span class="hljs-string">&quot;distilgpt2&quot;</span> ) <span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> outputs = model.generate(max_length=<span class="hljs-number">40</span>) <span class="hljs-comment"># do greedy decoding</span> <span class="hljs-built_in">print</span>(<span class="hljs-string">f&quot;Generated: <span class="hljs-subst">{tokenizer.decode(outputs[<span class="hljs-number">0</span>], skip_special_tokens=<span class="hljs-literal">True</span>)}</span>&quot;</span>) tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;openai-gpt&quot;</span>) <span class="hljs-comment"># Initialize tokenizer</span> model = TFAutoModelWithLMHead.from_pretrained( <span class="hljs-string">&quot;openai-gpt&quot;</span> ) <span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> input_context = <span class="hljs-string">&quot;The dog&quot;</span> input_ids = tokenizer.encode(input_context, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-comment"># encode input context</span> outputs = model.generate( input_ids=input_ids, num_beams=<span class="hljs-number">5</span>, num_return_sequences=<span class="hljs-number">3</span>, temperature=<span class="hljs-number">1.5</span> ) <span class="hljs-comment"># generate 3 independent sequences using beam search decoding (5 beams) with sampling from initial context &#x27;The dog&#x27;</span> <span class="hljs-keyword">for</span> i <span class="hljs-keyword">in</span> <span class="hljs-built_in">range</span>(<span class="hljs-number">3</span>): <span class="hljs-comment"># 3 output sequences were generated</span> <span class="hljs-built_in">print</span>(<span class="hljs-string">f&quot;Generated <span class="hljs-subst">{i}</span>: <span class="hljs-subst">{tokenizer.decode(outputs[i], skip_special_tokens=<span class="hljs-literal">True</span>)}</span>&quot;</span>) tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilgpt2&quot;</span>) <span class="hljs-comment"># Initialize tokenizer</span> model = TFAutoModelWithLMHead.from_pretrained( <span class="hljs-string">&quot;distilgpt2&quot;</span> ) <span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> input_context = <span class="hljs-string">&quot;The dog&quot;</span> input_ids = tokenizer.encode(input_context, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-comment"># encode input context</span> outputs = model.generate( input_ids=input_ids, max_length=<span class="hljs-number">40</span>, temperature=<span class="hljs-number">0.7</span>, num_return_sequences=<span class="hljs-number">3</span>, do_sample=<span class="hljs-literal">True</span> ) <span class="hljs-comment"># generate 3 candidates using sampling</span> <span class="hljs-keyword">for</span> i <span class="hljs-keyword">in</span> <span class="hljs-built_in">range</span>(<span class="hljs-number">3</span>): <span class="hljs-comment"># 3 output sequences were generated</span> <span class="hljs-built_in">print</span>(<span class="hljs-string">f&quot;Generated <span class="hljs-subst">{i}</span>: <span class="hljs-subst">{tokenizer.decode(outputs[i], skip_special_tokens=<span class="hljs-literal">True</span>)}</span>&quot;</span>) tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;ctrl&quot;</span>) <span class="hljs-comment"># Initialize tokenizer</span> model = TFAutoModelWithLMHead.from_pretrained( <span class="hljs-string">&quot;ctrl&quot;</span> ) <span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> input_context = <span class="hljs-string">&quot;Legal My neighbor is&quot;</span> <span class="hljs-comment"># &quot;Legal&quot; is one of the control codes for ctrl</span> input_ids = tokenizer.encode(input_context, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-comment"># encode input context</span> outputs = model.generate( input_ids=input_ids, max_length=<span class="hljs-number">50</span>, temperature=<span class="hljs-number">0.7</span>, repetition_penalty=<span class="hljs-number">1.2</span> ) <span class="hljs-comment"># generate sequences</span> <span class="hljs-built_in">print</span>(<span class="hljs-string">f&quot;Generated: <span class="hljs-subst">{tokenizer.decode(outputs[<span class="hljs-number">0</span>], skip_special_tokens=<span class="hljs-literal">True</span>)}</span>&quot;</span>) tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) <span class="hljs-comment"># Initialize tokenizer</span> model = TFAutoModelWithLMHead.from_pretrained( <span class="hljs-string">&quot;gpt2&quot;</span> ) <span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> input_context = <span class="hljs-string">&quot;My cute dog&quot;</span> bad_words_ids = [ tokenizer.encode(bad_word, add_prefix_space=<span class="hljs-literal">True</span>) <span class="hljs-keyword">for</span> bad_word <span class="hljs-keyword">in</span> [<span class="hljs-string">&quot;idiot&quot;</span>, <span class="hljs-string">&quot;stupid&quot;</span>, <span class="hljs-string">&quot;shut up&quot;</span>] ] input_ids = tokenizer.encode(input_context, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-comment"># encode input context</span> outputs = model.generate( input_ids=input_ids, max_length=<span class="hljs-number">100</span>, do_sample=<span class="hljs-literal">True</span>, bad_words_ids=bad_words_ids ) <span class="hljs-comment"># generate sequences without allowing bad_words to be generated</span>`}}),Rt=new Cs({}),Ut=new be({props:{name:"class transformers.generation_flax_utils.FlaxGenerationMixin",anchor:"transformers.generation_flax_utils.FlaxGenerationMixin",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_flax_utils.py#L119"}}),Zt=new be({props:{name:"generate",anchor:"transformers.generation_flax_utils.FlaxGenerationMixin.generate",parameters:[{name:"input_ids",val:": ndarray"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"pad_token_id",val:": typing.Optional[int] = None"},{name:"bos_token_id",val:": typing.Optional[int] = None"},{name:"eos_token_id",val:": typing.Optional[int] = None"},{name:"decoder_start_token_id",val:": typing.Optional[int] = None"},{name:"do_sample",val:": typing.Optional[bool] = None"},{name:"prng_key",val:": typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None"},{name:"top_k",val:": typing.Optional[int] = None"},{name:"top_p",val:": typing.Optional[float] = None"},{name:"temperature",val:": typing.Optional[float] = None"},{name:"num_beams",val:": typing.Optional[int] = None"},{name:"no_repeat_ngram_size",val:": typing.Optional[int] = None"},{name:"min_length",val:": typing.Optional[int] = None"},{name:"forced_bos_token_id",val:": typing.Optional[int] = None"},{name:"forced_eos_token_id",val:": typing.Optional[int] = None"},{name:"length_penalty",val:": typing.Optional[float] = None"},{name:"early_stopping",val:": typing.Optional[bool] = None"},{name:"trace",val:": bool = True"},{name:"params",val:": typing.Union[typing.Dict[str, jax._src.numpy.lax_numpy.ndarray], NoneType] = None"},{name:"**model_kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_flax_utils.py#L163",parametersDescription:[{anchor:"transformers.generation_flax_utils.FlaxGenerationMixin.generate.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; The sequence used as a prompt for the generation.`,name:"input_ids"},{anchor:"transformers.generation_flax_utils.FlaxGenerationMixin.generate.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>, defaults to 20) &#x2014; The maximum length of the sequence to be generated.`,name:"max_length"},{anchor:"transformers.generation_flax_utils.FlaxGenerationMixin.generate.do_sample",description:`<strong>do_sample</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use sampling ; use greedy decoding otherwise.`,name:"do_sample"},{anchor:"transformers.generation_flax_utils.FlaxGenerationMixin.generate.temperature",description:`<strong>temperature</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; The value used to module the next token probabilities.`,name:"temperature"},{anchor:"transformers.generation_flax_utils.FlaxGenerationMixin.generate.top_k",description:`<strong>top_k</strong> (<code>int</code>, <em>optional</em>, defaults to 50) &#x2014; The number of highest probability vocabulary tokens to keep for top-k-filtering.`,name:"top_k"},{anchor:"transformers.generation_flax_utils.FlaxGenerationMixin.generate.top_p",description:`<strong>top_p</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; If set to float &lt; 1, only the most probable tokens with probabilities that add up to <code>top_p</code> or higher are kept for generation.`,name:"top_p"},{anchor:"transformers.generation_flax_utils.FlaxGenerationMixin.generate.pad_token_id",description:`<strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.`,name:"pad_token_id"},{anchor:"transformers.generation_flax_utils.FlaxGenerationMixin.generate.bos_token_id",description:`<strong>bos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>beginning-of-sequence</em> token.`,name:"bos_token_id"},{anchor:"transformers.generation_flax_utils.FlaxGenerationMixin.generate.eos_token_id",description:`<strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.`,name:"eos_token_id"},{anchor:"transformers.generation_flax_utils.FlaxGenerationMixin.generate.num_beams",description:`<strong>num_beams</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Number of beams for beam search. 1 means no beam search.`,name:"num_beams"},{anchor:"transformers.generation_flax_utils.FlaxGenerationMixin.generate.decoder_start_token_id",description:`<strong>decoder_start_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; If an encoder-decoder model starts decoding with a different token than <em>bos</em>, the id of that token.`,name:"decoder_start_token_id"},{anchor:"transformers.generation_flax_utils.FlaxGenerationMixin.generate.trace",description:`<strong>trace</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to trace generation. Setting <code>trace=False</code> should only be used for debugging and will lead to a considerably slower runtime.`,name:"trace"},{anchor:"transformers.generation_flax_utils.FlaxGenerationMixin.generate.params",description:`<strong>params</strong> (<code>Dict[str, jnp.ndarray]</code>, <em>optional</em>) &#x2014; Optionally the model parameters can be passed. Can be useful for parallelized generation. model<em>kwargs &#x2014; Additional model specific kwargs will be forwarded to the <code>forward</code> function of the model. If the model is an encoder-decoder model, encoder specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder</em>*. Also accepts <code>encoder_outputs</code> to skip encoder part.`,name:"params"}],returnDescription:` <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput" >ModelOutput</a>.</p> `}}),Ye=new Rm({props:{warning:"&lcub;true}",$$slots:{default:[Jm]},$$scope:{ctx:it}}}),Qt=new ye({props:{code:`from transformers import AutoTokenizer, FlaxAutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("distilgpt2") model = FlaxAutoModelForCausalLM.from_pretrained("distilgpt2") input_context = "The dog" # encode input context input_ids = tokenizer(input_context, return_tensors="np").input_ids # generate candidates using sampling outputs = model.generate(input_ids=input_ids, max_length=20, top_k=30, do_sample=True) tokenizer.batch_decode(outputs, skip_special_tokens=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, FlaxAutoModelForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilgpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForCausalLM.from_pretrained(<span class="hljs-string">&quot;distilgpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_context = <span class="hljs-string">&quot;The dog&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># encode input context</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(input_context, return_tensors=<span class="hljs-string">&quot;np&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># generate candidates using sampling</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.generate(input_ids=input_ids, max_length=<span class="hljs-number">20</span>, top_k=<span class="hljs-number">30</span>, do_sample=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(outputs, skip_special_tokens=<span class="hljs-literal">True</span>)`}}),{c(){p=s("meta"),P=l(),j=s("h1"),T=s("a"),I=s("span"),m(E.$$.fragment),xe=l(),W=s("span"),G=t("Generation"),B=l(),v=s("p"),$=t("The methods for auto-regressive text generation, namely "),b=s("a"),Me=t("generate()"),Le=t(" (for the PyTorch models), "),H=s("a"),Is=t("generate()"),Ws=t(" (for the TensorFlow models) and "),en=s("a"),Bs=t("generate()"),Hs=t(" (for the Flax/JAX models), are implemented in "),tn=s("a"),Rs=t("GenerationMixin"),Us=t(", "),nn=s("a"),Vs=t("TFGenerationMixin"),Ks=t(" and "),on=s("a"),Zs=t("FlaxGenerationMixin"),Xs=t(" respectively."),_s=l(),S=s("p"),Js=t("The "),On=s("code"),Qs=t("GenerationMixin"),Ys=t(" classes are inherited by the corresponding base model classes, "),qn=s("em"),ea=t("e.g."),ta=l(),sn=s("a"),na=t("PreTrainedModel"),oa=t(", "),an=s("a"),sa=t("TFPreTrainedModel"),aa=t(", and "),rn=s("a"),ra=t("FlaxPreTrainedModel"),ia=t(` respectively, therefore exposing all methods for auto-regressive text generation to every model class.`),us=l(),we=s("h2"),Ze=s("a"),Gn=s("span"),m(lt.$$.fragment),la=l(),Sn=s("span"),da=t("GenerationMixn"),hs=l(),k=s("div"),m(dt.$$.fragment),ca=l(),ct=s("p"),pa=t("A class containing all functions for auto-regressive text generation, to be used as a mixin in "),ln=s("a"),ma=t("PreTrainedModel"),ga=t("."),_a=l(),pt=s("p"),ua=t("The class exposes "),dn=s("a"),ha=t("generate()"),fa=t(", which can be used for:"),ba=l(),F=s("ul"),R=s("li"),$n=s("em"),xa=t("greedy decoding"),ka=t(" by calling "),cn=s("a"),va=t("greedy_search()"),ya=t(" if "),Fn=s("code"),ja=t("num_beams=1"),Ma=t(` and `),An=s("code"),La=t("do_sample=False"),wa=t("."),Ta=l(),U=s("li"),zn=s("em"),Ea=t("multinomial sampling"),Oa=t(" by calling "),pn=s("a"),qa=t("sample()"),Ga=t(" if "),Pn=s("code"),Sa=t("num_beams=1"),$a=t(` and `),Nn=s("code"),Fa=t("do_sample=True"),Aa=t("."),za=l(),V=s("li"),Dn=s("em"),Pa=t("beam-search decoding"),Na=t(" by calling "),mn=s("a"),Da=t("beam_search()"),Ca=t(" if "),Cn=s("code"),Ia=t("num_beams>1"),Wa=t(` and `),In=s("code"),Ba=t("do_sample=False"),Ha=t("."),Ra=l(),K=s("li"),Wn=s("em"),Ua=t("beam-search multinomial sampling"),Va=t(" by calling "),gn=s("a"),Ka=t("beam_sample()"),Za=t(` if `),Bn=s("code"),Xa=t("num_beams>1"),Ja=t(" and "),Hn=s("code"),Qa=t("do_sample=True"),Ya=t("."),er=l(),Z=s("li"),Rn=s("em"),tr=t("diverse beam-search decoding"),nr=t(" by calling "),_n=s("a"),or=t("group_beam_search()"),sr=t(`, if `),Un=s("code"),ar=t("num_beams>1"),rr=t(" and "),Vn=s("code"),ir=t("num_beam_groups>1"),lr=t("."),dr=l(),X=s("li"),Kn=s("em"),cr=t("constrained beam-search decoding"),pr=t(" by calling "),un=s("a"),mr=t("constrained_beam_search()"),gr=t(`, if `),Zn=s("code"),_r=t("constraints!=None"),ur=t(" or "),Xn=s("code"),hr=t("force_words_ids!=None"),fr=t("."),br=l(),x=s("div"),m(mt.$$.fragment),xr=l(),Jn=s("p"),kr=t(`Generates sequences of token ids for models with a language modeling head. The method supports the following generation methods for text-decoder, text-to-text, speech-to-text, and vision-to-text models:`),vr=l(),A=s("ul"),J=s("li"),Qn=s("em"),yr=t("greedy decoding"),jr=t(" by calling "),hn=s("a"),Mr=t("greedy_search()"),Lr=t(" if "),Yn=s("code"),wr=t("num_beams=1"),Tr=t(` and `),eo=s("code"),Er=t("do_sample=False"),Or=t("."),qr=l(),Q=s("li"),to=s("em"),Gr=t("multinomial sampling"),Sr=t(" by calling "),fn=s("a"),$r=t("sample()"),Fr=t(" if "),no=s("code"),Ar=t("num_beams=1"),zr=t(` and `),oo=s("code"),Pr=t("do_sample=True"),Nr=t("."),Dr=l(),Y=s("li"),so=s("em"),Cr=t("beam-search decoding"),Ir=t(" by calling "),bn=s("a"),Wr=t("beam_search()"),Br=t(" if "),ao=s("code"),Hr=t("num_beams>1"),Rr=t(` and `),ro=s("code"),Ur=t("do_sample=False"),Vr=t("."),Kr=l(),ee=s("li"),io=s("em"),Zr=t("beam-search multinomial sampling"),Xr=t(" by calling "),xn=s("a"),Jr=t("beam_sample()"),Qr=t(` if `),lo=s("code"),Yr=t("num_beams>1"),ei=t(" and "),co=s("code"),ti=t("do_sample=True"),ni=t("."),oi=l(),te=s("li"),po=s("em"),si=t("diverse beam-search decoding"),ai=t(" by calling "),kn=s("a"),ri=t("group_beam_search()"),ii=t(`, if `),mo=s("code"),li=t("num_beams>1"),di=t(" and "),go=s("code"),ci=t("num_beam_groups>1"),pi=t("."),mi=l(),ne=s("li"),_o=s("em"),gi=t("constrained beam-search decoding"),_i=t(` by calling `),vn=s("a"),ui=t("constrained_beam_search()"),hi=t(", if "),uo=s("code"),fi=t("constraints!=None"),bi=t(` or `),ho=s("code"),xi=t("force_words_ids!=None"),ki=t("."),vi=l(),m(Xe.$$.fragment),yi=l(),gt=s("p"),ji=t("Most of these parameters are explained in more detail in "),_t=s("a"),Mi=t(`this blog post`),Li=t("."),wi=l(),fo=s("p"),Ti=t("Examples:"),Ei=l(),bo=s("p"),Oi=t("Greedy Decoding:"),qi=l(),m(ut.$$.fragment),Gi=l(),xo=s("p"),Si=t("Multinomial Sampling:"),$i=l(),m(ht.$$.fragment),Fi=l(),ko=s("p"),Ai=t("Beam-search decoding:"),zi=l(),m(ft.$$.fragment),Pi=l(),oe=s("div"),m(bt.$$.fragment),Ni=l(),xt=s("p"),Di=t("Generates sequences of token ids for models with a language modeling head using "),vo=s("strong"),Ci=t("greedy decoding"),Ii=t(` and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.`),Wi=l(),yo=s("p"),Bi=t("Examples:"),Hi=l(),m(kt.$$.fragment),Ri=l(),se=s("div"),m(vt.$$.fragment),Ui=l(),yt=s("p"),Vi=t("Generates sequences of token ids for models with a language modeling head using "),jo=s("strong"),Ki=t("multinomial sampling"),Zi=t(` and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.`),Xi=l(),Mo=s("p"),Ji=t("Examples:"),Qi=l(),m(jt.$$.fragment),Yi=l(),ae=s("div"),m(Mt.$$.fragment),el=l(),Lt=s("p"),tl=t("Generates sequences of token ids for models with a language modeling head using "),Lo=s("strong"),nl=t("beam search decoding"),ol=t(` and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.`),sl=l(),wo=s("p"),al=t("Examples:"),rl=l(),m(wt.$$.fragment),il=l(),re=s("div"),m(Tt.$$.fragment),ll=l(),Et=s("p"),dl=t("Generates sequences of token ids for models with a language modeling head using "),To=s("strong"),cl=t(`beam search multinomial sampling`),pl=t(" and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models."),ml=l(),Eo=s("p"),gl=t("Examples:"),_l=l(),m(Ot.$$.fragment),ul=l(),ie=s("div"),m(qt.$$.fragment),hl=l(),Gt=s("p"),fl=t("Generates sequences of token ids for models with a language modeling head using "),Oo=s("strong"),bl=t(`diverse beam search decoding`),xl=t(" and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models."),kl=l(),qo=s("p"),vl=t("Examples:"),yl=l(),m(St.$$.fragment),jl=l(),le=s("div"),m($t.$$.fragment),Ml=l(),Ft=s("p"),Ll=t("Generates sequences of token ids for models with a language modeling head using "),Go=s("strong"),wl=t(`constrained beam search decoding`),Tl=t(" and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models."),El=l(),So=s("p"),Ol=t("Examples:"),ql=l(),m(At.$$.fragment),fs=l(),Te=s("h2"),Je=s("a"),$o=s("span"),m(zt.$$.fragment),Gl=l(),Fo=s("span"),Sl=t("TFGenerationMixn"),bs=l(),ke=s("div"),m(Pt.$$.fragment),$l=l(),Nt=s("p"),Fl=t("A class containing all of the functions supporting generation, to be used as a mixin in "),yn=s("a"),Al=t("TFPreTrainedModel"),zl=t("."),Pl=l(),O=s("div"),m(Dt.$$.fragment),Nl=l(),Ao=s("p"),Dl=t(`Generates sequences for models with a language modeling head. The method currently supports greedy decoding, beam-search decoding, sampling with temperature, sampling with top-k or nucleus sampling.`),Cl=l(),Ct=s("p"),Il=t("Adapted in part from "),It=s("a"),Wl=t(`Facebook\u2019s XLM beam search code`),Bl=t("."),Hl=l(),ve=s("p"),Rl=t("Apart from "),zo=s("code"),Ul=t("input_ids"),Vl=t(" and "),Po=s("code"),Kl=t("attention_mask"),Zl=t(`, all the arguments below will default to the value of the attribute of the same name inside the `),jn=s("a"),Xl=t("PretrainedConfig"),Jl=t(` of the model. The default values indicated are the default values of those config.`),Ql=l(),Wt=s("p"),Yl=t("Most of these parameters are explained in more detail in "),Bt=s("a"),ed=t(`this blog post`),td=t("."),nd=l(),No=s("p"),od=t("Examples:"),sd=l(),m(Ht.$$.fragment),xs=l(),Ee=s("h2"),Qe=s("a"),Do=s("span"),m(Rt.$$.fragment),ad=l(),Co=s("span"),rd=t("FlaxGenerationMixn"),ks=l(),z=s("div"),m(Ut.$$.fragment),id=l(),Vt=s("p"),ld=t(`A class containing all functions for auto-regressive text generation, to be used as a mixin in `),Mn=s("a"),dd=t("FlaxPreTrainedModel"),cd=t("."),pd=l(),Kt=s("p"),md=t("The class exposes "),Ln=s("a"),gd=t("generate()"),_d=t(", which can be used for:"),ud=l(),Oe=s("ul"),de=s("li"),Io=s("em"),hd=t("greedy decoding"),fd=t(" by calling "),Wo=s("code"),bd=t("_greedy_search()"),xd=t(`if `),Bo=s("code"),kd=t("num_beams=1"),vd=t(" and "),Ho=s("code"),yd=t("do_sample=False"),jd=t("."),Md=l(),ce=s("li"),Ro=s("em"),Ld=t("multinomial sampling"),wd=t(" by calling "),Uo=s("code"),Td=t("_sample()"),Ed=t("if "),Vo=s("code"),Od=t("num_beams=1"),qd=t(` and `),Ko=s("code"),Gd=t("do_sample=True"),Sd=t("."),$d=l(),pe=s("li"),Zo=s("em"),Fd=t("beam-search decoding"),Ad=t(" by calling "),Xo=s("code"),zd=t("_beam_search"),Pd=t(" if "),Jo=s("code"),Nd=t("num_beams>1"),Dd=t(` and `),Qo=s("code"),Cd=t("do_sample=False"),Id=t("."),Wd=l(),q=s("div"),m(Zt.$$.fragment),Bd=l(),Yo=s("p"),Hd=t(`Generates sequences of token ids for models with a language modeling head. The method supports the following generation methods for text-decoder, text-to-text, speech-to-text, and vision-to-text models:`),Rd=l(),qe=s("ul"),me=s("li"),es=s("em"),Ud=t("greedy decoding"),Vd=t(" by calling "),ts=s("code"),Kd=t("_greedy_search()"),Zd=t(`if `),ns=s("code"),Xd=t("num_beams=1"),Jd=t(" and "),os=s("code"),Qd=t("do_sample=False"),Yd=t("."),ec=l(),ge=s("li"),ss=s("em"),tc=t("multinomial sampling"),nc=t(" by calling "),as=s("code"),oc=t("_sample()"),sc=t("if "),rs=s("code"),ac=t("num_beams=1"),rc=t(` and `),is=s("code"),ic=t("do_sample=True"),lc=t("."),dc=l(),_e=s("li"),ls=s("em"),cc=t("beam-search decoding"),pc=t(" by calling "),ds=s("code"),mc=t("_beam_search"),gc=t(" if "),cs=s("code"),_c=t("num_beams>1"),uc=t(` and `),ps=s("code"),hc=t("do_sample=False"),fc=t("."),bc=l(),m(Ye.$$.fragment),xc=l(),Xt=s("p"),kc=t("Most of these parameters are explained in more detail in "),Jt=s("a"),vc=t(`this blog post`),yc=t("."),jc=l(),ms=s("p"),Mc=t("Examples:"),Lc=l(),m(Qt.$$.fragment),this.h()},l(i){const y=Zm('[data-svelte="svelte-1phssyn"]',document.head);p=a(y,"META",{name:!0,content:!0}),y.forEach(o),P=d(i),j=a(i,"H1",{class:!0});var Yt=r(j);T=a(Yt,"A",{id:!0,class:!0,href:!0});var gs=r(T);I=a(gs,"SPAN",{});var wc=r(I);g(E.$$.fragment,wc),wc.forEach(o),gs.forEach(o),xe=d(Yt),W=a(Yt,"SPAN",{});var Tc=r(W);G=n(Tc,"Generation"),Tc.forEach(o),Yt.forEach(o),B=d(i),v=a(i,"P",{});var N=r(v);$=n(N,"The methods for auto-regressive text generation, namely "),b=a(N,"A",{href:!0});var Ec=r(b);Me=n(Ec,"generate()"),Ec.forEach(o),Le=n(N," (for the PyTorch models), "),H=a(N,"A",{href:!0});var Oc=r(H);Is=n(Oc,"generate()"),Oc.forEach(o),Ws=n(N," (for the TensorFlow models) and "),en=a(N,"A",{href:!0});var qc=r(en);Bs=n(qc,"generate()"),qc.forEach(o),Hs=n(N," (for the Flax/JAX models), are implemented in "),tn=a(N,"A",{href:!0});var Gc=r(tn);Rs=n(Gc,"GenerationMixin"),Gc.forEach(o),Us=n(N,", "),nn=a(N,"A",{href:!0});var Sc=r(nn);Vs=n(Sc,"TFGenerationMixin"),Sc.forEach(o),Ks=n(N," and "),on=a(N,"A",{href:!0});var $c=r(on);Zs=n($c,"FlaxGenerationMixin"),$c.forEach(o),Xs=n(N," respectively."),N.forEach(o),_s=d(i),S=a(i,"P",{});var ue=r(S);Js=n(ue,"The "),On=a(ue,"CODE",{});var Fc=r(On);Qs=n(Fc,"GenerationMixin"),Fc.forEach(o),Ys=n(ue," classes are inherited by the corresponding base model classes, "),qn=a(ue,"EM",{});var Ac=r(qn);ea=n(Ac,"e.g."),Ac.forEach(o),ta=d(ue),sn=a(ue,"A",{href:!0});var zc=r(sn);na=n(zc,"PreTrainedModel"),zc.forEach(o),oa=n(ue,", "),an=a(ue,"A",{href:!0});var Pc=r(an);sa=n(Pc,"TFPreTrainedModel"),Pc.forEach(o),aa=n(ue,", and "),rn=a(ue,"A",{href:!0});var Nc=r(rn);ra=n(Nc,"FlaxPreTrainedModel"),Nc.forEach(o),ia=n(ue,` respectively, therefore exposing all methods for auto-regressive text generation to every model class.`),ue.forEach(o),us=d(i),we=a(i,"H2",{class:!0});var ys=r(we);Ze=a(ys,"A",{id:!0,class:!0,href:!0});var Dc=r(Ze);Gn=a(Dc,"SPAN",{});var Cc=r(Gn);g(lt.$$.fragment,Cc),Cc.forEach(o),Dc.forEach(o),la=d(ys),Sn=a(ys,"SPAN",{});var Ic=r(Sn);da=n(Ic,"GenerationMixn"),Ic.forEach(o),ys.forEach(o),hs=d(i),k=a(i,"DIV",{class:!0});var L=r(k);g(dt.$$.fragment,L),ca=d(L),ct=a(L,"P",{});var js=r(ct);pa=n(js,"A class containing all functions for auto-regressive text generation, to be used as a mixin in "),ln=a(js,"A",{href:!0});var Wc=r(ln);ma=n(Wc,"PreTrainedModel"),Wc.forEach(o),ga=n(js,"."),js.forEach(o),_a=d(L),pt=a(L,"P",{});var Ms=r(pt);ua=n(Ms,"The class exposes "),dn=a(Ms,"A",{href:!0});var Bc=r(dn);ha=n(Bc,"generate()"),Bc.forEach(o),fa=n(Ms,", which can be used for:"),Ms.forEach(o),ba=d(L),F=a(L,"UL",{});var he=r(F);R=a(he,"LI",{});var Ge=r(R);$n=a(Ge,"EM",{});var Hc=r($n);xa=n(Hc,"greedy decoding"),Hc.forEach(o),ka=n(Ge," by calling "),cn=a(Ge,"A",{href:!0});var Rc=r(cn);va=n(Rc,"greedy_search()"),Rc.forEach(o),ya=n(Ge," if "),Fn=a(Ge,"CODE",{});var Uc=r(Fn);ja=n(Uc,"num_beams=1"),Uc.forEach(o),Ma=n(Ge,` and `),An=a(Ge,"CODE",{});var Vc=r(An);La=n(Vc,"do_sample=False"),Vc.forEach(o),wa=n(Ge,"."),Ge.forEach(o),Ta=d(he),U=a(he,"LI",{});var Se=r(U);zn=a(Se,"EM",{});var Kc=r(zn);Ea=n(Kc,"multinomial sampling"),Kc.forEach(o),Oa=n(Se," by calling "),pn=a(Se,"A",{href:!0});var Zc=r(pn);qa=n(Zc,"sample()"),Zc.forEach(o),Ga=n(Se," if "),Pn=a(Se,"CODE",{});var Xc=r(Pn);Sa=n(Xc,"num_beams=1"),Xc.forEach(o),$a=n(Se,` and `),Nn=a(Se,"CODE",{});var Jc=r(Nn);Fa=n(Jc,"do_sample=True"),Jc.forEach(o),Aa=n(Se,"."),Se.forEach(o),za=d(he),V=a(he,"LI",{});var $e=r(V);Dn=a($e,"EM",{});var Qc=r(Dn);Pa=n(Qc,"beam-search decoding"),Qc.forEach(o),Na=n($e," by calling "),mn=a($e,"A",{href:!0});var Yc=r(mn);Da=n(Yc,"beam_search()"),Yc.forEach(o),Ca=n($e," if "),Cn=a($e,"CODE",{});var ep=r(Cn);Ia=n(ep,"num_beams>1"),ep.forEach(o),Wa=n($e,` and `),In=a($e,"CODE",{});var tp=r(In);Ba=n(tp,"do_sample=False"),tp.forEach(o),Ha=n($e,"."),$e.forEach(o),Ra=d(he),K=a(he,"LI",{});var Fe=r(K);Wn=a(Fe,"EM",{});var np=r(Wn);Ua=n(np,"beam-search multinomial sampling"),np.forEach(o),Va=n(Fe," by calling "),gn=a(Fe,"A",{href:!0});var op=r(gn);Ka=n(op,"beam_sample()"),op.forEach(o),Za=n(Fe,` if `),Bn=a(Fe,"CODE",{});var sp=r(Bn);Xa=n(sp,"num_beams>1"),sp.forEach(o),Ja=n(Fe," and "),Hn=a(Fe,"CODE",{});var ap=r(Hn);Qa=n(ap,"do_sample=True"),ap.forEach(o),Ya=n(Fe,"."),Fe.forEach(o),er=d(he),Z=a(he,"LI",{});var Ae=r(Z);Rn=a(Ae,"EM",{});var rp=r(Rn);tr=n(rp,"diverse beam-search decoding"),rp.forEach(o),nr=n(Ae," by calling "),_n=a(Ae,"A",{href:!0});var ip=r(_n);or=n(ip,"group_beam_search()"),ip.forEach(o),sr=n(Ae,`, if `),Un=a(Ae,"CODE",{});var lp=r(Un);ar=n(lp,"num_beams>1"),lp.forEach(o),rr=n(Ae," and "),Vn=a(Ae,"CODE",{});var dp=r(Vn);ir=n(dp,"num_beam_groups>1"),dp.forEach(o),lr=n(Ae,"."),Ae.forEach(o),dr=d(he),X=a(he,"LI",{});var ze=r(X);Kn=a(ze,"EM",{});var cp=r(Kn);cr=n(cp,"constrained beam-search decoding"),cp.forEach(o),pr=n(ze," by calling "),un=a(ze,"A",{href:!0});var pp=r(un);mr=n(pp,"constrained_beam_search()"),pp.forEach(o),gr=n(ze,`, if `),Zn=a(ze,"CODE",{});var mp=r(Zn);_r=n(mp,"constraints!=None"),mp.forEach(o),ur=n(ze," or "),Xn=a(ze,"CODE",{});var gp=r(Xn);hr=n(gp,"force_words_ids!=None"),gp.forEach(o),fr=n(ze,"."),ze.forEach(o),he.forEach(o),br=d(L),x=a(L,"DIV",{class:!0});var M=r(x);g(mt.$$.fragment,M),xr=d(M),Jn=a(M,"P",{});var _p=r(Jn);kr=n(_p,`Generates sequences of token ids for models with a language modeling head. The method supports the following generation methods for text-decoder, text-to-text, speech-to-text, and vision-to-text models:`),_p.forEach(o),vr=d(M),A=a(M,"UL",{});var fe=r(A);J=a(fe,"LI",{});var Pe=r(J);Qn=a(Pe,"EM",{});var up=r(Qn);yr=n(up,"greedy decoding"),up.forEach(o),jr=n(Pe," by calling "),hn=a(Pe,"A",{href:!0});var hp=r(hn);Mr=n(hp,"greedy_search()"),hp.forEach(o),Lr=n(Pe," if "),Yn=a(Pe,"CODE",{});var fp=r(Yn);wr=n(fp,"num_beams=1"),fp.forEach(o),Tr=n(Pe,` and `),eo=a(Pe,"CODE",{});var bp=r(eo);Er=n(bp,"do_sample=False"),bp.forEach(o),Or=n(Pe,"."),Pe.forEach(o),qr=d(fe),Q=a(fe,"LI",{});var Ne=r(Q);to=a(Ne,"EM",{});var xp=r(to);Gr=n(xp,"multinomial sampling"),xp.forEach(o),Sr=n(Ne," by calling "),fn=a(Ne,"A",{href:!0});var kp=r(fn);$r=n(kp,"sample()"),kp.forEach(o),Fr=n(Ne," if "),no=a(Ne,"CODE",{});var vp=r(no);Ar=n(vp,"num_beams=1"),vp.forEach(o),zr=n(Ne,` and `),oo=a(Ne,"CODE",{});var yp=r(oo);Pr=n(yp,"do_sample=True"),yp.forEach(o),Nr=n(Ne,"."),Ne.forEach(o),Dr=d(fe),Y=a(fe,"LI",{});var De=r(Y);so=a(De,"EM",{});var jp=r(so);Cr=n(jp,"beam-search decoding"),jp.forEach(o),Ir=n(De," by calling "),bn=a(De,"A",{href:!0});var Mp=r(bn);Wr=n(Mp,"beam_search()"),Mp.forEach(o),Br=n(De," if "),ao=a(De,"CODE",{});var Lp=r(ao);Hr=n(Lp,"num_beams>1"),Lp.forEach(o),Rr=n(De,` and `),ro=a(De,"CODE",{});var wp=r(ro);Ur=n(wp,"do_sample=False"),wp.forEach(o),Vr=n(De,"."),De.forEach(o),Kr=d(fe),ee=a(fe,"LI",{});var Ce=r(ee);io=a(Ce,"EM",{});var Tp=r(io);Zr=n(Tp,"beam-search multinomial sampling"),Tp.forEach(o),Xr=n(Ce," by calling "),xn=a(Ce,"A",{href:!0});var Ep=r(xn);Jr=n(Ep,"beam_sample()"),Ep.forEach(o),Qr=n(Ce,` if `),lo=a(Ce,"CODE",{});var Op=r(lo);Yr=n(Op,"num_beams>1"),Op.forEach(o),ei=n(Ce," and "),co=a(Ce,"CODE",{});var qp=r(co);ti=n(qp,"do_sample=True"),qp.forEach(o),ni=n(Ce,"."),Ce.forEach(o),oi=d(fe),te=a(fe,"LI",{});var Ie=r(te);po=a(Ie,"EM",{});var Gp=r(po);si=n(Gp,"diverse beam-search decoding"),Gp.forEach(o),ai=n(Ie," by calling "),kn=a(Ie,"A",{href:!0});var Sp=r(kn);ri=n(Sp,"group_beam_search()"),Sp.forEach(o),ii=n(Ie,`, if `),mo=a(Ie,"CODE",{});var $p=r(mo);li=n($p,"num_beams>1"),$p.forEach(o),di=n(Ie," and "),go=a(Ie,"CODE",{});var Fp=r(go);ci=n(Fp,"num_beam_groups>1"),Fp.forEach(o),pi=n(Ie,"."),Ie.forEach(o),mi=d(fe),ne=a(fe,"LI",{});var We=r(ne);_o=a(We,"EM",{});var Ap=r(_o);gi=n(Ap,"constrained beam-search decoding"),Ap.forEach(o),_i=n(We,` by calling `),vn=a(We,"A",{href:!0});var zp=r(vn);ui=n(zp,"constrained_beam_search()"),zp.forEach(o),hi=n(We,", if "),uo=a(We,"CODE",{});var Pp=r(uo);fi=n(Pp,"constraints!=None"),Pp.forEach(o),bi=n(We,` or `),ho=a(We,"CODE",{});var Np=r(ho);xi=n(Np,"force_words_ids!=None"),Np.forEach(o),ki=n(We,"."),We.forEach(o),fe.forEach(o),vi=d(M),g(Xe.$$.fragment,M),yi=d(M),gt=a(M,"P",{});var Ls=r(gt);ji=n(Ls,"Most of these parameters are explained in more detail in "),_t=a(Ls,"A",{href:!0,rel:!0});var Dp=r(_t);Mi=n(Dp,`this blog post`),Dp.forEach(o),Li=n(Ls,"."),Ls.forEach(o),wi=d(M),fo=a(M,"P",{});var Cp=r(fo);Ti=n(Cp,"Examples:"),Cp.forEach(o),Ei=d(M),bo=a(M,"P",{});var Ip=r(bo);Oi=n(Ip,"Greedy Decoding:"),Ip.forEach(o),qi=d(M),g(ut.$$.fragment,M),Gi=d(M),xo=a(M,"P",{});var Wp=r(xo);Si=n(Wp,"Multinomial Sampling:"),Wp.forEach(o),$i=d(M),g(ht.$$.fragment,M),Fi=d(M),ko=a(M,"P",{});var Bp=r(ko);Ai=n(Bp,"Beam-search decoding:"),Bp.forEach(o),zi=d(M),g(ft.$$.fragment,M),M.forEach(o),Pi=d(L),oe=a(L,"DIV",{class:!0});var et=r(oe);g(bt.$$.fragment,et),Ni=d(et),xt=a(et,"P",{});var ws=r(xt);Di=n(ws,"Generates sequences of token ids for models with a language modeling head using "),vo=a(ws,"STRONG",{});var Hp=r(vo);Ci=n(Hp,"greedy decoding"),Hp.forEach(o),Ii=n(ws,` and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.`),ws.forEach(o),Wi=d(et),yo=a(et,"P",{});var Rp=r(yo);Bi=n(Rp,"Examples:"),Rp.forEach(o),Hi=d(et),g(kt.$$.fragment,et),et.forEach(o),Ri=d(L),se=a(L,"DIV",{class:!0});var tt=r(se);g(vt.$$.fragment,tt),Ui=d(tt),yt=a(tt,"P",{});var Ts=r(yt);Vi=n(Ts,"Generates sequences of token ids for models with a language modeling head using "),jo=a(Ts,"STRONG",{});var Up=r(jo);Ki=n(Up,"multinomial sampling"),Up.forEach(o),Zi=n(Ts,` and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.`),Ts.forEach(o),Xi=d(tt),Mo=a(tt,"P",{});var Vp=r(Mo);Ji=n(Vp,"Examples:"),Vp.forEach(o),Qi=d(tt),g(jt.$$.fragment,tt),tt.forEach(o),Yi=d(L),ae=a(L,"DIV",{class:!0});var nt=r(ae);g(Mt.$$.fragment,nt),el=d(nt),Lt=a(nt,"P",{});var Es=r(Lt);tl=n(Es,"Generates sequences of token ids for models with a language modeling head using "),Lo=a(Es,"STRONG",{});var Kp=r(Lo);nl=n(Kp,"beam search decoding"),Kp.forEach(o),ol=n(Es,` and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.`),Es.forEach(o),sl=d(nt),wo=a(nt,"P",{});var Zp=r(wo);al=n(Zp,"Examples:"),Zp.forEach(o),rl=d(nt),g(wt.$$.fragment,nt),nt.forEach(o),il=d(L),re=a(L,"DIV",{class:!0});var ot=r(re);g(Tt.$$.fragment,ot),ll=d(ot),Et=a(ot,"P",{});var Os=r(Et);dl=n(Os,"Generates sequences of token ids for models with a language modeling head using "),To=a(Os,"STRONG",{});var Xp=r(To);cl=n(Xp,`beam search multinomial sampling`),Xp.forEach(o),pl=n(Os," and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models."),Os.forEach(o),ml=d(ot),Eo=a(ot,"P",{});var Jp=r(Eo);gl=n(Jp,"Examples:"),Jp.forEach(o),_l=d(ot),g(Ot.$$.fragment,ot),ot.forEach(o),ul=d(L),ie=a(L,"DIV",{class:!0});var st=r(ie);g(qt.$$.fragment,st),hl=d(st),Gt=a(st,"P",{});var qs=r(Gt);fl=n(qs,"Generates sequences of token ids for models with a language modeling head using "),Oo=a(qs,"STRONG",{});var Qp=r(Oo);bl=n(Qp,`diverse beam search decoding`),Qp.forEach(o),xl=n(qs," and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models."),qs.forEach(o),kl=d(st),qo=a(st,"P",{});var Yp=r(qo);vl=n(Yp,"Examples:"),Yp.forEach(o),yl=d(st),g(St.$$.fragment,st),st.forEach(o),jl=d(L),le=a(L,"DIV",{class:!0});var at=r(le);g($t.$$.fragment,at),Ml=d(at),Ft=a(at,"P",{});var Gs=r(Ft);Ll=n(Gs,"Generates sequences of token ids for models with a language modeling head using "),Go=a(Gs,"STRONG",{});var em=r(Go);wl=n(em,`constrained beam search decoding`),em.forEach(o),Tl=n(Gs," and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models."),Gs.forEach(o),El=d(at),So=a(at,"P",{});var tm=r(So);Ol=n(tm,"Examples:"),tm.forEach(o),ql=d(at),g(At.$$.fragment,at),at.forEach(o),L.forEach(o),fs=d(i),Te=a(i,"H2",{class:!0});var Ss=r(Te);Je=a(Ss,"A",{id:!0,class:!0,href:!0});var nm=r(Je);$o=a(nm,"SPAN",{});var om=r($o);g(zt.$$.fragment,om),om.forEach(o),nm.forEach(o),Gl=d(Ss),Fo=a(Ss,"SPAN",{});var sm=r(Fo);Sl=n(sm,"TFGenerationMixn"),sm.forEach(o),Ss.forEach(o),bs=d(i),ke=a(i,"DIV",{class:!0});var wn=r(ke);g(Pt.$$.fragment,wn),$l=d(wn),Nt=a(wn,"P",{});var $s=r(Nt);Fl=n($s,"A class containing all of the functions supporting generation, to be used as a mixin in "),yn=a($s,"A",{href:!0});var am=r(yn);Al=n(am,"TFPreTrainedModel"),am.forEach(o),zl=n($s,"."),$s.forEach(o),Pl=d(wn),O=a(wn,"DIV",{class:!0});var D=r(O);g(Dt.$$.fragment,D),Nl=d(D),Ao=a(D,"P",{});var rm=r(Ao);Dl=n(rm,`Generates sequences for models with a language modeling head. The method currently supports greedy decoding, beam-search decoding, sampling with temperature, sampling with top-k or nucleus sampling.`),rm.forEach(o),Cl=d(D),Ct=a(D,"P",{});var Fs=r(Ct);Il=n(Fs,"Adapted in part from "),It=a(Fs,"A",{href:!0,rel:!0});var im=r(It);Wl=n(im,`Facebook\u2019s XLM beam search code`),im.forEach(o),Bl=n(Fs,"."),Fs.forEach(o),Hl=d(D),ve=a(D,"P",{});var rt=r(ve);Rl=n(rt,"Apart from "),zo=a(rt,"CODE",{});var lm=r(zo);Ul=n(lm,"input_ids"),lm.forEach(o),Vl=n(rt," and "),Po=a(rt,"CODE",{});var dm=r(Po);Kl=n(dm,"attention_mask"),dm.forEach(o),Zl=n(rt,`, all the arguments below will default to the value of the attribute of the same name inside the `),jn=a(rt,"A",{href:!0});var cm=r(jn);Xl=n(cm,"PretrainedConfig"),cm.forEach(o),Jl=n(rt,` of the model. The default values indicated are the default values of those config.`),rt.forEach(o),Ql=d(D),Wt=a(D,"P",{});var As=r(Wt);Yl=n(As,"Most of these parameters are explained in more detail in "),Bt=a(As,"A",{href:!0,rel:!0});var pm=r(Bt);ed=n(pm,`this blog post`),pm.forEach(o),td=n(As,"."),As.forEach(o),nd=d(D),No=a(D,"P",{});var mm=r(No);od=n(mm,"Examples:"),mm.forEach(o),sd=d(D),g(Ht.$$.fragment,D),D.forEach(o),wn.forEach(o),xs=d(i),Ee=a(i,"H2",{class:!0});var zs=r(Ee);Qe=a(zs,"A",{id:!0,class:!0,href:!0});var gm=r(Qe);Do=a(gm,"SPAN",{});var _m=r(Do);g(Rt.$$.fragment,_m),_m.forEach(o),gm.forEach(o),ad=d(zs),Co=a(zs,"SPAN",{});var um=r(Co);rd=n(um,"FlaxGenerationMixn"),um.forEach(o),zs.forEach(o),ks=d(i),z=a(i,"DIV",{class:!0});var je=r(z);g(Ut.$$.fragment,je),id=d(je),Vt=a(je,"P",{});var Ps=r(Vt);ld=n(Ps,`A class containing all functions for auto-regressive text generation, to be used as a mixin in `),Mn=a(Ps,"A",{href:!0});var hm=r(Mn);dd=n(hm,"FlaxPreTrainedModel"),hm.forEach(o),cd=n(Ps,"."),Ps.forEach(o),pd=d(je),Kt=a(je,"P",{});var Ns=r(Kt);md=n(Ns,"The class exposes "),Ln=a(Ns,"A",{href:!0});var fm=r(Ln);gd=n(fm,"generate()"),fm.forEach(o),_d=n(Ns,", which can be used for:"),Ns.forEach(o),ud=d(je),Oe=a(je,"UL",{});var Tn=r(Oe);de=a(Tn,"LI",{});var Be=r(de);Io=a(Be,"EM",{});var bm=r(Io);hd=n(bm,"greedy decoding"),bm.forEach(o),fd=n(Be," by calling "),Wo=a(Be,"CODE",{});var xm=r(Wo);bd=n(xm,"_greedy_search()"),xm.forEach(o),xd=n(Be,`if `),Bo=a(Be,"CODE",{});var km=r(Bo);kd=n(km,"num_beams=1"),km.forEach(o),vd=n(Be," and "),Ho=a(Be,"CODE",{});var vm=r(Ho);yd=n(vm,"do_sample=False"),vm.forEach(o),jd=n(Be,"."),Be.forEach(o),Md=d(Tn),ce=a(Tn,"LI",{});var He=r(ce);Ro=a(He,"EM",{});var ym=r(Ro);Ld=n(ym,"multinomial sampling"),ym.forEach(o),wd=n(He," by calling "),Uo=a(He,"CODE",{});var jm=r(Uo);Td=n(jm,"_sample()"),jm.forEach(o),Ed=n(He,"if "),Vo=a(He,"CODE",{});var Mm=r(Vo);Od=n(Mm,"num_beams=1"),Mm.forEach(o),qd=n(He,` and `),Ko=a(He,"CODE",{});var Lm=r(Ko);Gd=n(Lm,"do_sample=True"),Lm.forEach(o),Sd=n(He,"."),He.forEach(o),$d=d(Tn),pe=a(Tn,"LI",{});var Re=r(pe);Zo=a(Re,"EM",{});var wm=r(Zo);Fd=n(wm,"beam-search decoding"),wm.forEach(o),Ad=n(Re," by calling "),Xo=a(Re,"CODE",{});var Tm=r(Xo);zd=n(Tm,"_beam_search"),Tm.forEach(o),Pd=n(Re," if "),Jo=a(Re,"CODE",{});var Em=r(Jo);Nd=n(Em,"num_beams>1"),Em.forEach(o),Dd=n(Re,` and `),Qo=a(Re,"CODE",{});var Om=r(Qo);Cd=n(Om,"do_sample=False"),Om.forEach(o),Id=n(Re,"."),Re.forEach(o),Tn.forEach(o),Wd=d(je),q=a(je,"DIV",{class:!0});var C=r(q);g(Zt.$$.fragment,C),Bd=d(C),Yo=a(C,"P",{});var qm=r(Yo);Hd=n(qm,`Generates sequences of token ids for models with a language modeling head. The method supports the following generation methods for text-decoder, text-to-text, speech-to-text, and vision-to-text models:`),qm.forEach(o),Rd=d(C),qe=a(C,"UL",{});var En=r(qe);me=a(En,"LI",{});var Ue=r(me);es=a(Ue,"EM",{});var Gm=r(es);Ud=n(Gm,"greedy decoding"),Gm.forEach(o),Vd=n(Ue," by calling "),ts=a(Ue,"CODE",{});var Sm=r(ts);Kd=n(Sm,"_greedy_search()"),Sm.forEach(o),Zd=n(Ue,`if `),ns=a(Ue,"CODE",{});var $m=r(ns);Xd=n($m,"num_beams=1"),$m.forEach(o),Jd=n(Ue," and "),os=a(Ue,"CODE",{});var Fm=r(os);Qd=n(Fm,"do_sample=False"),Fm.forEach(o),Yd=n(Ue,"."),Ue.forEach(o),ec=d(En),ge=a(En,"LI",{});var Ve=r(ge);ss=a(Ve,"EM",{});var Am=r(ss);tc=n(Am,"multinomial sampling"),Am.forEach(o),nc=n(Ve," by calling "),as=a(Ve,"CODE",{});var zm=r(as);oc=n(zm,"_sample()"),zm.forEach(o),sc=n(Ve,"if "),rs=a(Ve,"CODE",{});var Pm=r(rs);ac=n(Pm,"num_beams=1"),Pm.forEach(o),rc=n(Ve,` and `),is=a(Ve,"CODE",{});var Nm=r(is);ic=n(Nm,"do_sample=True"),Nm.forEach(o),lc=n(Ve,"."),Ve.forEach(o),dc=d(En),_e=a(En,"LI",{});var Ke=r(_e);ls=a(Ke,"EM",{});var Dm=r(ls);cc=n(Dm,"beam-search decoding"),Dm.forEach(o),pc=n(Ke," by calling "),ds=a(Ke,"CODE",{});var Cm=r(ds);mc=n(Cm,"_beam_search"),Cm.forEach(o),gc=n(Ke," if "),cs=a(Ke,"CODE",{});var Im=r(cs);_c=n(Im,"num_beams>1"),Im.forEach(o),uc=n(Ke,` and `),ps=a(Ke,"CODE",{});var Wm=r(ps);hc=n(Wm,"do_sample=False"),Wm.forEach(o),fc=n(Ke,"."),Ke.forEach(o),En.forEach(o),bc=d(C),g(Ye.$$.fragment,C),xc=d(C),Xt=a(C,"P",{});var Ds=r(Xt);kc=n(Ds,"Most of these parameters are explained in more detail in "),Jt=a(Ds,"A",{href:!0,rel:!0});var Bm=r(Jt);vc=n(Bm,`this blog post`),Bm.forEach(o),yc=n(Ds,"."),Ds.forEach(o),jc=d(C),ms=a(C,"P",{});var Hm=r(ms);Mc=n(Hm,"Examples:"),Hm.forEach(o),Lc=d(C),g(Qt.$$.fragment,C),C.forEach(o),je.forEach(o),this.h()},h(){c(p,"name","hf:doc:metadata"),c(p,"content",JSON.stringify(Ym)),c(T,"id","generation"),c(T,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(T,"href","#generation"),c(j,"class","relative group"),c(b,"href","/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.generate"),c(H,"href","/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_tf_utils.TFGenerationMixin.generate"),c(en,"href","/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_flax_utils.FlaxGenerationMixin.generate"),c(tn,"href","/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin"),c(nn,"href","/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_tf_utils.TFGenerationMixin"),c(on,"href","/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_flax_utils.FlaxGenerationMixin"),c(sn,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),c(an,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel"),c(rn,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(Ze,"id","transformers.generation_utils.GenerationMixin"),c(Ze,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ze,"href","#transformers.generation_utils.GenerationMixin"),c(we,"class","relative group"),c(ln,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),c(dn,"href","/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.generate"),c(cn,"href","/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.greedy_search"),c(pn,"href","/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.sample"),c(mn,"href","/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.beam_search"),c(gn,"href","/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.beam_sample"),c(_n,"href","/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.group_beam_search"),c(un,"href","/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.constrained_beam_search"),c(hn,"href","/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.greedy_search"),c(fn,"href","/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.sample"),c(bn,"href","/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.beam_search"),c(xn,"href","/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.beam_sample"),c(kn,"href","/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.group_beam_search"),c(vn,"href","/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.constrained_beam_search"),c(_t,"href","https://huggingface.co/blog/how-to-generate"),c(_t,"rel","nofollow"),c(x,"class","docstring"),c(oe,"class","docstring"),c(se,"class","docstring"),c(ae,"class","docstring"),c(re,"class","docstring"),c(ie,"class","docstring"),c(le,"class","docstring"),c(k,"class","docstring"),c(Je,"id","transformers.generation_tf_utils.TFGenerationMixin"),c(Je,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Je,"href","#transformers.generation_tf_utils.TFGenerationMixin"),c(Te,"class","relative group"),c(yn,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel"),c(It,"href","https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529"),c(It,"rel","nofollow"),c(jn,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),c(Bt,"href","https://huggingface.co/blog/how-to-generate"),c(Bt,"rel","nofollow"),c(O,"class","docstring"),c(ke,"class","docstring"),c(Qe,"id","transformers.generation_flax_utils.FlaxGenerationMixin"),c(Qe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Qe,"href","#transformers.generation_flax_utils.FlaxGenerationMixin"),c(Ee,"class","relative group"),c(Mn,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel"),c(Ln,"href","/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_flax_utils.FlaxGenerationMixin.generate"),c(Jt,"href","https://huggingface.co/blog/how-to-generate"),c(Jt,"rel","nofollow"),c(q,"class","docstring"),c(z,"class","docstring")},m(i,y){e(document.head,p),w(i,P,y),w(i,j,y),e(j,T),e(T,I),_(E,I,null),e(j,xe),e(j,W),e(W,G),w(i,B,y),w(i,v,y),e(v,$),e(v,b),e(b,Me),e(v,Le),e(v,H),e(H,Is),e(v,Ws),e(v,en),e(en,Bs),e(v,Hs),e(v,tn),e(tn,Rs),e(v,Us),e(v,nn),e(nn,Vs),e(v,Ks),e(v,on),e(on,Zs),e(v,Xs),w(i,_s,y),w(i,S,y),e(S,Js),e(S,On),e(On,Qs),e(S,Ys),e(S,qn),e(qn,ea),e(S,ta),e(S,sn),e(sn,na),e(S,oa),e(S,an),e(an,sa),e(S,aa),e(S,rn),e(rn,ra),e(S,ia),w(i,us,y),w(i,we,y),e(we,Ze),e(Ze,Gn),_(lt,Gn,null),e(we,la),e(we,Sn),e(Sn,da),w(i,hs,y),w(i,k,y),_(dt,k,null),e(k,ca),e(k,ct),e(ct,pa),e(ct,ln),e(ln,ma),e(ct,ga),e(k,_a),e(k,pt),e(pt,ua),e(pt,dn),e(dn,ha),e(pt,fa),e(k,ba),e(k,F),e(F,R),e(R,$n),e($n,xa),e(R,ka),e(R,cn),e(cn,va),e(R,ya),e(R,Fn),e(Fn,ja),e(R,Ma),e(R,An),e(An,La),e(R,wa),e(F,Ta),e(F,U),e(U,zn),e(zn,Ea),e(U,Oa),e(U,pn),e(pn,qa),e(U,Ga),e(U,Pn),e(Pn,Sa),e(U,$a),e(U,Nn),e(Nn,Fa),e(U,Aa),e(F,za),e(F,V),e(V,Dn),e(Dn,Pa),e(V,Na),e(V,mn),e(mn,Da),e(V,Ca),e(V,Cn),e(Cn,Ia),e(V,Wa),e(V,In),e(In,Ba),e(V,Ha),e(F,Ra),e(F,K),e(K,Wn),e(Wn,Ua),e(K,Va),e(K,gn),e(gn,Ka),e(K,Za),e(K,Bn),e(Bn,Xa),e(K,Ja),e(K,Hn),e(Hn,Qa),e(K,Ya),e(F,er),e(F,Z),e(Z,Rn),e(Rn,tr),e(Z,nr),e(Z,_n),e(_n,or),e(Z,sr),e(Z,Un),e(Un,ar),e(Z,rr),e(Z,Vn),e(Vn,ir),e(Z,lr),e(F,dr),e(F,X),e(X,Kn),e(Kn,cr),e(X,pr),e(X,un),e(un,mr),e(X,gr),e(X,Zn),e(Zn,_r),e(X,ur),e(X,Xn),e(Xn,hr),e(X,fr),e(k,br),e(k,x),_(mt,x,null),e(x,xr),e(x,Jn),e(Jn,kr),e(x,vr),e(x,A),e(A,J),e(J,Qn),e(Qn,yr),e(J,jr),e(J,hn),e(hn,Mr),e(J,Lr),e(J,Yn),e(Yn,wr),e(J,Tr),e(J,eo),e(eo,Er),e(J,Or),e(A,qr),e(A,Q),e(Q,to),e(to,Gr),e(Q,Sr),e(Q,fn),e(fn,$r),e(Q,Fr),e(Q,no),e(no,Ar),e(Q,zr),e(Q,oo),e(oo,Pr),e(Q,Nr),e(A,Dr),e(A,Y),e(Y,so),e(so,Cr),e(Y,Ir),e(Y,bn),e(bn,Wr),e(Y,Br),e(Y,ao),e(ao,Hr),e(Y,Rr),e(Y,ro),e(ro,Ur),e(Y,Vr),e(A,Kr),e(A,ee),e(ee,io),e(io,Zr),e(ee,Xr),e(ee,xn),e(xn,Jr),e(ee,Qr),e(ee,lo),e(lo,Yr),e(ee,ei),e(ee,co),e(co,ti),e(ee,ni),e(A,oi),e(A,te),e(te,po),e(po,si),e(te,ai),e(te,kn),e(kn,ri),e(te,ii),e(te,mo),e(mo,li),e(te,di),e(te,go),e(go,ci),e(te,pi),e(A,mi),e(A,ne),e(ne,_o),e(_o,gi),e(ne,_i),e(ne,vn),e(vn,ui),e(ne,hi),e(ne,uo),e(uo,fi),e(ne,bi),e(ne,ho),e(ho,xi),e(ne,ki),e(x,vi),_(Xe,x,null),e(x,yi),e(x,gt),e(gt,ji),e(gt,_t),e(_t,Mi),e(gt,Li),e(x,wi),e(x,fo),e(fo,Ti),e(x,Ei),e(x,bo),e(bo,Oi),e(x,qi),_(ut,x,null),e(x,Gi),e(x,xo),e(xo,Si),e(x,$i),_(ht,x,null),e(x,Fi),e(x,ko),e(ko,Ai),e(x,zi),_(ft,x,null),e(k,Pi),e(k,oe),_(bt,oe,null),e(oe,Ni),e(oe,xt),e(xt,Di),e(xt,vo),e(vo,Ci),e(xt,Ii),e(oe,Wi),e(oe,yo),e(yo,Bi),e(oe,Hi),_(kt,oe,null),e(k,Ri),e(k,se),_(vt,se,null),e(se,Ui),e(se,yt),e(yt,Vi),e(yt,jo),e(jo,Ki),e(yt,Zi),e(se,Xi),e(se,Mo),e(Mo,Ji),e(se,Qi),_(jt,se,null),e(k,Yi),e(k,ae),_(Mt,ae,null),e(ae,el),e(ae,Lt),e(Lt,tl),e(Lt,Lo),e(Lo,nl),e(Lt,ol),e(ae,sl),e(ae,wo),e(wo,al),e(ae,rl),_(wt,ae,null),e(k,il),e(k,re),_(Tt,re,null),e(re,ll),e(re,Et),e(Et,dl),e(Et,To),e(To,cl),e(Et,pl),e(re,ml),e(re,Eo),e(Eo,gl),e(re,_l),_(Ot,re,null),e(k,ul),e(k,ie),_(qt,ie,null),e(ie,hl),e(ie,Gt),e(Gt,fl),e(Gt,Oo),e(Oo,bl),e(Gt,xl),e(ie,kl),e(ie,qo),e(qo,vl),e(ie,yl),_(St,ie,null),e(k,jl),e(k,le),_($t,le,null),e(le,Ml),e(le,Ft),e(Ft,Ll),e(Ft,Go),e(Go,wl),e(Ft,Tl),e(le,El),e(le,So),e(So,Ol),e(le,ql),_(At,le,null),w(i,fs,y),w(i,Te,y),e(Te,Je),e(Je,$o),_(zt,$o,null),e(Te,Gl),e(Te,Fo),e(Fo,Sl),w(i,bs,y),w(i,ke,y),_(Pt,ke,null),e(ke,$l),e(ke,Nt),e(Nt,Fl),e(Nt,yn),e(yn,Al),e(Nt,zl),e(ke,Pl),e(ke,O),_(Dt,O,null),e(O,Nl),e(O,Ao),e(Ao,Dl),e(O,Cl),e(O,Ct),e(Ct,Il),e(Ct,It),e(It,Wl),e(Ct,Bl),e(O,Hl),e(O,ve),e(ve,Rl),e(ve,zo),e(zo,Ul),e(ve,Vl),e(ve,Po),e(Po,Kl),e(ve,Zl),e(ve,jn),e(jn,Xl),e(ve,Jl),e(O,Ql),e(O,Wt),e(Wt,Yl),e(Wt,Bt),e(Bt,ed),e(Wt,td),e(O,nd),e(O,No),e(No,od),e(O,sd),_(Ht,O,null),w(i,xs,y),w(i,Ee,y),e(Ee,Qe),e(Qe,Do),_(Rt,Do,null),e(Ee,ad),e(Ee,Co),e(Co,rd),w(i,ks,y),w(i,z,y),_(Ut,z,null),e(z,id),e(z,Vt),e(Vt,ld),e(Vt,Mn),e(Mn,dd),e(Vt,cd),e(z,pd),e(z,Kt),e(Kt,md),e(Kt,Ln),e(Ln,gd),e(Kt,_d),e(z,ud),e(z,Oe),e(Oe,de),e(de,Io),e(Io,hd),e(de,fd),e(de,Wo),e(Wo,bd),e(de,xd),e(de,Bo),e(Bo,kd),e(de,vd),e(de,Ho),e(Ho,yd),e(de,jd),e(Oe,Md),e(Oe,ce),e(ce,Ro),e(Ro,Ld),e(ce,wd),e(ce,Uo),e(Uo,Td),e(ce,Ed),e(ce,Vo),e(Vo,Od),e(ce,qd),e(ce,Ko),e(Ko,Gd),e(ce,Sd),e(Oe,$d),e(Oe,pe),e(pe,Zo),e(Zo,Fd),e(pe,Ad),e(pe,Xo),e(Xo,zd),e(pe,Pd),e(pe,Jo),e(Jo,Nd),e(pe,Dd),e(pe,Qo),e(Qo,Cd),e(pe,Id),e(z,Wd),e(z,q),_(Zt,q,null),e(q,Bd),e(q,Yo),e(Yo,Hd),e(q,Rd),e(q,qe),e(qe,me),e(me,es),e(es,Ud),e(me,Vd),e(me,ts),e(ts,Kd),e(me,Zd),e(me,ns),e(ns,Xd),e(me,Jd),e(me,os),e(os,Qd),e(me,Yd),e(qe,ec),e(qe,ge),e(ge,ss),e(ss,tc),e(ge,nc),e(ge,as),e(as,oc),e(ge,sc),e(ge,rs),e(rs,ac),e(ge,rc),e(ge,is),e(is,ic),e(ge,lc),e(qe,dc),e(qe,_e),e(_e,ls),e(ls,cc),e(_e,pc),e(_e,ds),e(ds,mc),e(_e,gc),e(_e,cs),e(cs,_c),e(_e,uc),e(_e,ps),e(ps,hc),e(_e,fc),e(q,bc),_(Ye,q,null),e(q,xc),e(q,Xt),e(Xt,kc),e(Xt,Jt),e(Jt,vc),e(Xt,yc),e(q,jc),e(q,ms),e(ms,Mc),e(q,Lc),_(Qt,q,null),vs=!0},p(i,[y]){const Yt={};y&2&&(Yt.$$scope={dirty:y,ctx:i}),Xe.$set(Yt);const gs={};y&2&&(gs.$$scope={dirty:y,ctx:i}),Ye.$set(gs)},i(i){vs||(u(E.$$.fragment,i),u(lt.$$.fragment,i),u(dt.$$.fragment,i),u(mt.$$.fragment,i),u(Xe.$$.fragment,i),u(ut.$$.fragment,i),u(ht.$$.fragment,i),u(ft.$$.fragment,i),u(bt.$$.fragment,i),u(kt.$$.fragment,i),u(vt.$$.fragment,i),u(jt.$$.fragment,i),u(Mt.$$.fragment,i),u(wt.$$.fragment,i),u(Tt.$$.fragment,i),u(Ot.$$.fragment,i),u(qt.$$.fragment,i),u(St.$$.fragment,i),u($t.$$.fragment,i),u(At.$$.fragment,i),u(zt.$$.fragment,i),u(Pt.$$.fragment,i),u(Dt.$$.fragment,i),u(Ht.$$.fragment,i),u(Rt.$$.fragment,i),u(Ut.$$.fragment,i),u(Zt.$$.fragment,i),u(Ye.$$.fragment,i),u(Qt.$$.fragment,i),vs=!0)},o(i){h(E.$$.fragment,i),h(lt.$$.fragment,i),h(dt.$$.fragment,i),h(mt.$$.fragment,i),h(Xe.$$.fragment,i),h(ut.$$.fragment,i),h(ht.$$.fragment,i),h(ft.$$.fragment,i),h(bt.$$.fragment,i),h(kt.$$.fragment,i),h(vt.$$.fragment,i),h(jt.$$.fragment,i),h(Mt.$$.fragment,i),h(wt.$$.fragment,i),h(Tt.$$.fragment,i),h(Ot.$$.fragment,i),h(qt.$$.fragment,i),h(St.$$.fragment,i),h($t.$$.fragment,i),h(At.$$.fragment,i),h(zt.$$.fragment,i),h(Pt.$$.fragment,i),h(Dt.$$.fragment,i),h(Ht.$$.fragment,i),h(Rt.$$.fragment,i),h(Ut.$$.fragment,i),h(Zt.$$.fragment,i),h(Ye.$$.fragment,i),h(Qt.$$.fragment,i),vs=!1},d(i){o(p),i&&o(P),i&&o(j),f(E),i&&o(B),i&&o(v),i&&o(_s),i&&o(S),i&&o(us),i&&o(we),f(lt),i&&o(hs),i&&o(k),f(dt),f(mt),f(Xe),f(ut),f(ht),f(ft),f(bt),f(kt),f(vt),f(jt),f(Mt),f(wt),f(Tt),f(Ot),f(qt),f(St),f($t),f(At),i&&o(fs),i&&o(Te),f(zt),i&&o(bs),i&&o(ke),f(Pt),f(Dt),f(Ht),i&&o(xs),i&&o(Ee),f(Rt),i&&o(ks),i&&o(z),f(Ut),f(Zt),f(Ye),f(Qt)}}}const Ym={local:"generation",sections:[{local:"transformers.generation_utils.GenerationMixin",title:"GenerationMixn"},{local:"transformers.generation_tf_utils.TFGenerationMixin",title:"TFGenerationMixn"},{local:"transformers.generation_flax_utils.FlaxGenerationMixin",title:"FlaxGenerationMixn"}],title:"Generation"};function eg(it,p,P){let{fw:j}=p;return it.$$set=T=>{"fw"in T&&P(0,j=T.fw)},[j]}class ig extends Um{constructor(p){super();Vm(this,p,eg,Qm,Km,{fw:0})}}export{ig as default,Ym as metadata};
400
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/main_classes/trainer.mdx-a51a0aac.js
import{S as sL,i as iL,s as lL,e as n,k as l,w as h,t as r,M as dL,c as s,d as o,m as d,a as i,x as u,h as a,b as m,F as e,g as p,y as f,q as g,o as _,B as v}from"../../chunks/vendor-4833417e.js";import{T as Kb}from"../../chunks/Tip-fffd6df1.js";import{D as $}from"../../chunks/Docstring-4f315ed9.js";import{C as P}from"../../chunks/CodeBlock-6a3d1b46.js";import{I as se}from"../../chunks/IconCopyLink-4b81c553.js";import"../../chunks/CopyButton-dacfbfaf.js";function cL(Ye){let T,L,x,S,fe,R,W,V,ge,ee,G,ie,le,te,de,Y,Ze,_e,N,C,at,oe,nt,st,ve,sa,ia,Je,Ae,la,be,da,ca;return{c(){T=n("p"),L=r("The "),x=n("a"),S=r("Trainer"),fe=r(` class is optimized for \u{1F917} Transformers models and can have surprising behaviors when you use it on other models. When using it on your own model, make sure:`),R=l(),W=n("ul"),V=n("li"),ge=r("your model always return tuples or subclasses of "),ee=n("a"),G=r("ModelOutput"),ie=r("."),le=l(),te=n("li"),de=r("your model can compute the loss if a "),Y=n("code"),Ze=r("labels"),_e=r(` argument is provided and that loss is returned as the first element of the tuple (if your model returns tuples)`),N=l(),C=n("li"),at=r("your model can accept multiple label arguments (use the "),oe=n("code"),nt=r("label_names"),st=r(" in your "),ve=n("a"),sa=r("TrainingArguments"),ia=r(" to indicate their name to the "),Je=n("a"),Ae=r("Trainer"),la=r(") but none of them should be named "),be=n("code"),da=r('"label"'),ca=r("."),this.h()},l(Z){T=s(Z,"P",{});var H=i(T);L=a(H,"The "),x=s(H,"A",{href:!0});var Qs=i(x);S=a(Qs,"Trainer"),Qs.forEach(o),fe=a(H,` class is optimized for \u{1F917} Transformers models and can have surprising behaviors when you use it on other models. When using it on your own model, make sure:`),H.forEach(o),R=d(Z),W=s(Z,"UL",{});var ye=i(W);V=s(ye,"LI",{});var Lo=i(V);ge=a(Lo,"your model always return tuples or subclasses of "),ee=s(Lo,"A",{href:!0});var ei=i(ee);G=a(ei,"ModelOutput"),ei.forEach(o),ie=a(Lo,"."),Lo.forEach(o),le=d(ye),te=s(ye,"LI",{});var Kt=i(te);de=a(Kt,"your model can compute the loss if a "),Y=s(Kt,"CODE",{});var D=i(Y);Ze=a(D,"labels"),D.forEach(o),_e=a(Kt,` argument is provided and that loss is returned as the first element of the tuple (if your model returns tuples)`),Kt.forEach(o),N=d(ye),C=s(ye,"LI",{});var B=i(C);at=a(B,"your model can accept multiple label arguments (use the "),oe=s(B,"CODE",{});var Fo=i(oe);nt=a(Fo,"label_names"),Fo.forEach(o),st=a(B," in your "),ve=s(B,"A",{href:!0});var ti=i(ve);sa=a(ti,"TrainingArguments"),ti.forEach(o),ia=a(B," to indicate their name to the "),Je=s(B,"A",{href:!0});var oi=i(Je);Ae=a(oi,"Trainer"),oi.forEach(o),la=a(B,") but none of them should be named "),be=s(B,"CODE",{});var ri=i(be);da=a(ri,'"label"'),ri.forEach(o),ca=a(B,"."),B.forEach(o),ye.forEach(o),this.h()},h(){m(x,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),m(ee,"href","/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput"),m(ve,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments"),m(Je,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer")},m(Z,H){p(Z,T,H),e(T,L),e(T,x),e(x,S),e(T,fe),p(Z,R,H),p(Z,W,H),e(W,V),e(V,ge),e(V,ee),e(ee,G),e(V,ie),e(W,le),e(W,te),e(te,de),e(te,Y),e(Y,Ze),e(te,_e),e(W,N),e(W,C),e(C,at),e(C,oe),e(oe,nt),e(C,st),e(C,ve),e(ve,sa),e(C,ia),e(C,Je),e(Je,Ae),e(C,la),e(C,be),e(be,da),e(C,ca)},d(Z){Z&&o(T),Z&&o(R),Z&&o(W)}}}function pL(Ye){let T,L,x,S,fe,R,W,V,ge,ee,G,ie,le,te,de,Y,Ze;return{c(){T=n("p"),L=r("To use this method, you need to have provided a "),x=n("code"),S=r("model_init"),fe=r(" when initializing your "),R=n("a"),W=r("Trainer"),V=r(`: we need to reinitialize the model at each new run. This is incompatible with the `),ge=n("code"),ee=r("optimizers"),G=r(` argument, so you need to subclass `),ie=n("a"),le=r("Trainer"),te=r(" and override the method "),de=n("a"),Y=r("create_optimizer_and_scheduler()"),Ze=r(` for custom optimizer/scheduler.`),this.h()},l(_e){T=s(_e,"P",{});var N=i(T);L=a(N,"To use this method, you need to have provided a "),x=s(N,"CODE",{});var C=i(x);S=a(C,"model_init"),C.forEach(o),fe=a(N," when initializing your "),R=s(N,"A",{href:!0});var at=i(R);W=a(at,"Trainer"),at.forEach(o),V=a(N,`: we need to reinitialize the model at each new run. This is incompatible with the `),ge=s(N,"CODE",{});var oe=i(ge);ee=a(oe,"optimizers"),oe.forEach(o),G=a(N,` argument, so you need to subclass `),ie=s(N,"A",{href:!0});var nt=i(ie);le=a(nt,"Trainer"),nt.forEach(o),te=a(N," and override the method "),de=s(N,"A",{href:!0});var st=i(de);Y=a(st,"create_optimizer_and_scheduler()"),st.forEach(o),Ze=a(N,` for custom optimizer/scheduler.`),N.forEach(o),this.h()},h(){m(R,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),m(ie,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),m(de,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer.create_optimizer_and_scheduler")},m(_e,N){p(_e,T,N),e(T,L),e(T,x),e(x,S),e(T,fe),e(T,R),e(R,W),e(T,V),e(T,ge),e(ge,ee),e(T,G),e(T,ie),e(ie,le),e(T,te),e(T,de),e(de,Y),e(T,Ze)},d(_e){_e&&o(T)}}}function mL(Ye){let T,L;return{c(){T=n("p"),L=r(`If your predictions or labels have different sequence length (for instance because you\u2019re doing dynamic padding in a token classification task) the predictions will be padded (on the right) to allow for concatenation into one array. The padding index is -100.`)},l(x){T=s(x,"P",{});var S=i(T);L=a(S,`If your predictions or labels have different sequence length (for instance because you\u2019re doing dynamic padding in a token classification task) the predictions will be padded (on the right) to allow for concatenation into one array. The padding index is -100.`),S.forEach(o)},m(x,S){p(x,T,S),e(T,L)},d(x){x&&o(T)}}}function hL(Ye){let T,L;return{c(){T=n("p"),L=r(`If your predictions or labels have different sequence lengths (for instance because you\u2019re doing dynamic padding in a token classification task) the predictions will be padded (on the right) to allow for concatenation into one array. The padding index is -100.`)},l(x){T=s(x,"P",{});var S=i(T);L=a(S,`If your predictions or labels have different sequence lengths (for instance because you\u2019re doing dynamic padding in a token classification task) the predictions will be padded (on the right) to allow for concatenation into one array. The padding index is -100.`),S.forEach(o)},m(x,S){p(x,T,S),e(T,L)},d(x){x&&o(T)}}}function uL(Ye){let T,L,x,S,fe,R,W,V,ge,ee,G,ie,le,te,de,Y,Ze,_e,N,C,at,oe,nt,st,ve,sa,ia,Je,Ae,la,be,da,ca,Z,H,Qs,ye,Lo,ei,Kt,D,B,Fo,ti,oi,ri,ai,sc,Qb,ey,ty,ni,ic,oy,ry,ay,si,lc,ny,sy,iy,it,dc,ly,dy,cc,cy,py,pc,my,hy,uy,ii,mc,fy,gy,_y,li,hc,vy,by,yy,di,uc,wy,Ey,Ty,ci,fc,$y,ky,xy,pi,gc,Ay,Dy,qy,mi,_c,Py,Sy,Oy,hi,vc,Cy,Iy,Og,Ro,Cg,Wo,Uy,ui,Ny,zy,Ig,pa,Ug,lt,Ly,fi,Fy,Ry,gi,Wy,Gy,Ng,Qt,Go,bc,ma,jy,yc,My,zg,b,ha,Vy,wc,Hy,By,Ec,Yy,Zy,we,jo,Tc,Jy,Xy,_i,Ky,Qy,e2,J,$c,t2,o2,kc,r2,a2,xc,n2,s2,Ac,i2,l2,Dc,d2,c2,qc,p2,m2,h2,vi,Pc,u2,f2,g2,De,Sc,_2,v2,Oc,b2,y2,Cc,w2,E2,Ic,T2,$2,k2,qe,Uc,x2,A2,Nc,D2,q2,zc,P2,S2,Lc,O2,C2,I2,Mo,ua,U2,fa,N2,Fc,z2,L2,F2,Vo,ga,R2,_a,W2,Rc,G2,j2,M2,dt,va,V2,Wc,H2,B2,Gc,Y2,Z2,ct,ba,J2,jc,X2,K2,ya,Q2,Mc,ew,tw,ow,pt,wa,rw,Vc,aw,nw,Xe,sw,Hc,iw,lw,Bc,dw,cw,Yc,pw,mw,hw,Ho,Ea,uw,Zc,fw,gw,Pe,Ta,_w,Jc,vw,bw,$a,yw,Xc,ww,Ew,Tw,Kc,$w,kw,mt,ka,xw,eo,Aw,Qc,Dw,qw,ep,Pw,Sw,Ow,tp,Cw,Iw,Bo,xa,Uw,Aa,Nw,bi,zw,Lw,Fw,ht,Da,Rw,qa,Ww,op,Gw,jw,Mw,rp,Vw,Hw,Yo,Pa,Bw,ap,Yw,Zw,ut,Sa,Jw,Oa,Xw,np,Kw,Qw,e0,sp,t0,o0,Se,Ca,r0,Ia,a0,ip,n0,s0,i0,to,l0,lp,d0,c0,dp,p0,m0,h0,cp,u0,f0,ft,Ua,g0,Ee,_0,pp,v0,b0,mp,y0,w0,hp,E0,T0,up,$0,k0,x0,Zo,A0,Jo,Na,D0,za,q0,fp,P0,S0,O0,Xo,La,C0,gp,I0,U0,Ko,Fa,N0,Ra,z0,_p,L0,F0,R0,gt,Wa,W0,Ga,G0,vp,j0,M0,V0,bp,H0,B0,k,ja,Y0,yp,Z0,J0,wp,X0,K0,Ep,Q0,eE,oo,tE,Tp,oE,rE,$p,aE,nE,sE,kp,iE,lE,Ma,dE,xp,Ap,cE,pE,Ke,Te,mE,Dp,hE,uE,qp,fE,gE,Pp,_E,vE,Sp,bE,yE,wE,ro,EE,Op,TE,$E,Cp,kE,xE,AE,yi,Ip,DE,qE,PE,_t,Up,SE,OE,Np,CE,IE,zp,UE,NE,zE,Lp,LE,FE,Fp,RE,WE,Va,GE,Rp,jE,ME,VE,Qe,HE,Wp,BE,YE,Gp,ZE,JE,jp,XE,KE,QE,re,e4,wi,t4,o4,Mp,r4,a4,Vp,n4,s4,Hp,i4,l4,Bp,d4,c4,p4,I,m4,Yp,h4,u4,Zp,f4,g4,Jp,_4,v4,Xp,b4,y4,Ha,w4,E4,Kp,T4,$4,Qp,k4,x4,em,A4,D4,tm,q4,P4,om,S4,O4,C4,$e,I4,Ei,U4,N4,rm,z4,L4,Ti,F4,R4,am,W4,G4,j4,nm,M4,V4,Qo,Ba,H4,sm,B4,Y4,vt,Ya,Z4,Za,J4,im,X4,K4,Q4,$i,eT,lm,tT,oT,bt,Ja,rT,Xa,aT,dm,nT,sT,iT,Ka,lT,cm,dT,cT,pT,X,Qa,mT,pm,hT,uT,en,fT,mm,gT,_T,vT,er,bT,tn,yT,hm,wT,ET,TT,ao,no,$T,um,kT,xT,fm,AT,DT,qT,so,PT,gm,ST,OT,_m,CT,IT,UT,io,NT,vm,zT,LT,bm,FT,RT,WT,yt,on,GT,lo,jT,ym,MT,VT,wm,HT,BT,YT,Em,ZT,JT,wt,rn,XT,co,KT,Tm,QT,e3,$m,t3,o3,r3,km,a3,n3,tr,an,s3,et,i3,xm,l3,d3,Am,c3,p3,Dm,m3,h3,u3,or,nn,f3,sn,g3,qm,_3,v3,b3,Oe,ln,y3,dn,w3,Pm,E3,T3,$3,Sm,k3,x3,cn,A3,Om,D3,q3,P3,Et,pn,S3,mn,O3,Cm,C3,I3,U3,Im,N3,z3,Tt,hn,L3,Um,F3,R3,Nm,W3,G3,rr,un,j3,zm,M3,V3,$t,fn,H3,Lm,B3,Y3,Fm,Z3,Lg,po,ar,Rm,gn,J3,Wm,X3,Fg,tt,_n,K3,Ce,vn,Q3,Gm,e6,t6,bn,o6,jm,r6,a6,n6,Mm,s6,i6,K,yn,l6,Vm,d6,c6,wn,p6,Hm,m6,h6,u6,nr,f6,En,g6,Bm,_6,v6,b6,mo,ho,y6,Ym,w6,E6,Zm,T6,$6,k6,uo,x6,Jm,A6,D6,Xm,q6,P6,S6,fo,O6,Km,C6,I6,Qm,U6,N6,Rg,go,sr,eh,Tn,z6,th,L6,Wg,z,$n,F6,kn,R6,oh,W6,G6,j6,_o,M6,ki,V6,H6,xn,B6,Y6,Z6,ce,An,J6,rh,X6,K6,vo,Q6,ah,e$,t$,nh,o$,r$,a$,bo,n$,sh,s$,i$,ih,l$,d$,c$,Dn,p$,lh,m$,h$,u$,ir,qn,f$,dh,g$,_$,kt,Pn,v$,ch,b$,y$,yo,w$,ph,E$,T$,mh,$$,k$,x$,lr,Sn,A$,On,D$,hh,q$,P$,S$,dr,Cn,O$,uh,C$,I$,cr,In,U$,fh,N$,Gg,wo,pr,gh,Un,z$,_h,L$,jg,ae,Nn,F$,zn,R$,vh,W$,G$,j$,Eo,M$,xi,V$,H$,Ln,B$,Y$,Z$,ne,J$,bh,X$,K$,yh,Q$,ek,wh,tk,ok,Eh,rk,ak,Th,nk,sk,ik,q,lk,$h,dk,ck,kh,pk,mk,xh,hk,uk,Ah,fk,gk,Dh,_k,vk,qh,bk,yk,Ph,wk,Ek,Sh,Tk,$k,Oh,kk,xk,Ch,Ak,Dk,Ih,qk,Pk,Uh,Sk,Ok,Nh,Ck,Ik,Mg,To,mr,zh,Fn,Uk,Lh,Nk,Vg,pe,zk,Ai,Lk,Fk,Fh,Rk,Wk,Di,Gk,jk,Rh,Mk,Vk,Hg,hr,Hk,qi,Bk,Yk,Bg,ur,Pi,Wh,Zk,Jk,Xk,Si,Gh,Kk,Qk,Yg,Ie,e5,jh,t5,o5,Mh,r5,a5,Oi,n5,s5,Zg,fr,gr,Vh,i5,l5,Hh,d5,c5,p5,Ci,Bh,m5,h5,Jg,$o,_r,Yh,Rn,u5,Zh,f5,Xg,Ue,g5,Ii,_5,v5,Jh,b5,y5,Xh,w5,E5,Kg,xt,T5,Kh,$5,k5,Ui,x5,A5,Qg,vr,Ni,Qh,D5,q5,P5,zi,eu,S5,O5,e_,Ne,C5,Li,I5,U5,tu,N5,z5,ou,L5,F5,t_,Q,R5,Fi,W5,G5,ru,j5,M5,au,V5,H5,nu,B5,Y5,Ri,Z5,J5,o_,Wi,X5,r_,Wn,a_,Gi,K5,n_,Gn,s_,ji,Q5,i_,jn,l_,Mi,ex,d_,Vi,tx,c_,Mn,p_,br,ox,su,rx,ax,m_,ko,yr,iu,Vn,nx,lu,sx,h_,me,ix,Hi,lx,dx,du,cx,px,cu,mx,hx,pu,ux,fx,u_,At,gx,Hn,_x,vx,mu,bx,yx,f_,xo,wr,hu,Bn,wx,uu,Ex,g_,Bi,Tx,__,Er,$x,Yn,fu,kx,xx,v_,Zn,b_,Dt,Ax,Jn,gu,Dx,qx,Xn,_u,Px,Sx,y_,Kn,w_,Qn,E_,Tr,Ox,Yi,Cx,Ix,T_,Zi,Ux,$_,Ji,Nx,k_,Xi,vu,bu,zx,x_,$r,Lx,yu,Fx,Rx,A_,Ki,Wx,D_,es,q_,qt,Gx,wu,jx,Mx,Eu,Vx,Hx,P_,Qi,Bx,S_,ts,O_,Pt,Yx,Tu,Zx,Jx,$u,Xx,Kx,C_,St,Qx,ku,eA,tA,os,xu,oA,rA,I_,rs,U_,el,aA,N_,as,z_,tl,nA,L_,ns,F_,ol,sA,R_,rl,Au,Du,iA,W_,kr,lA,qu,dA,cA,G_,al,ss,pA,Pu,mA,hA,j_,is,M_,ls,Su,uA,V_,ds,H_,Ot,fA,Ou,gA,_A,Cu,vA,bA,B_,nl,yA,Y_,cs,Z_,sl,wA,J_,xr,EA,Iu,TA,$A,X_,Ao,Ar,Uu,ps,kA,Nu,xA,K_,Dr,AA,il,DA,qA,Q_,ze,PA,ms,SA,OA,hs,CA,IA,us,UA,NA,ev,ll,zA,tv,dl,ov,Do,qr,zu,fs,LA,Lu,FA,rv,cl,RA,av,Ct,WA,gs,GA,jA,_s,MA,VA,nv,pl,HA,sv,vs,iv,ml,BA,lv,It,YA,Fu,ZA,JA,Ru,XA,KA,dv,qo,Pr,Wu,bs,QA,Gu,e9,cv,hl,t9,pv,Le,o9,ju,r9,a9,Mu,n9,s9,Vu,i9,l9,mv,Ut,d9,Hu,c9,p9,Bu,m9,h9,hv,ys,uv,Sr,u9,ws,f9,g9,fv,Po,Or,Yu,Es,_9,Zu,v9,gv,ul,b9,_v,Ts,vv,Nt,y9,Ju,w9,E9,Xu,T9,$9,bv,fl,k9,yv,$s,wv,gl,x9,Ev,Cr,A9,Ku,D9,q9,Tv,ot,Qu,P9,S9,ef,O9,C9,tf,I9,U9,$v,_l,N9,kv,ks,xv,vl,z9,Av,Fe,L9,of,F9,R9,rf,W9,G9,af,j9,M9,Dv,So,Ir,nf,xs,V9,sf,H9,qv,zt,B9,lf,Y9,Z9,df,J9,X9,Pv,bl,K9,Sv,yl,Q9,Ov,Ur,e8,cf,t8,o8,Cv,As,Iv,j,r8,pf,a8,n8,mf,s8,i8,hf,l8,d8,uf,c8,p8,ff,m8,h8,gf,u8,f8,Uv,wl,g8,Nv,Oo,Nr,_f,Ds,_8,vf,v8,zv,Re,b8,qs,y8,w8,El,E8,T8,Ps,$8,k8,Lv,We,bf,x8,A8,yf,D8,q8,wf,P8,S8,Ef,O8,Fv,Tl,C8,Rv,Ss,Tf,I8,U8,Wv,$l,N8,Gv,Os,jv,Lt,z8,$f,L8,F8,kf,R8,W8,Mv,Cs,Vv,Ft,G8,xf,j8,M8,Is,V8,H8,Hv,zr,B8,kl,Y8,Z8,Bv,xl,J8,Yv,Us,Af,X8,K8,Zv,Ns,Jv,Al,Q8,Xv,zs,Kv,Ls,Df,eD,tD,Qv,Fs,e1,Dl,oD,t1,Rs,o1,ql,rD,r1,Lr,aD,Ws,nD,sD,a1,Gs,qf,iD,lD,n1,Rt,dD,Pf,cD,pD,Sf,mD,hD,s1,Fr,uD,Of,fD,gD,i1,js,l1,Pl,_D,d1,Ge,Cf,vD,bD,If,yD,wD,Ms,ED,Uf,TD,$D,kD,Vs,xD,Nf,AD,DD,c1,Hs,rt,qD,zf,PD,SD,Lf,OD,CD,Ff,ID,UD,p1,Rr,ND,Rf,zD,LD,m1,Bs,h1,Co,Wf,FD,RD,Gf,WD,GD,u1,Wt,jD,jf,MD,VD,Mf,HD,BD,f1,Sl,YD,g1,he,Vf,ZD,JD,Hf,XD,KD,Ys,QD,Bf,e7,t7,o7,Io,r7,Yf,a7,n7,Zf,s7,i7,l7,Jf,d7,_1,Ol,c7,v1,Wr,Uo,p7,Xf,m7,h7,Kf,u7,f7,g7,ke,_7,Qf,v7,b7,eg,y7,w7,tg,E7,T7,og,$7,k7,b1,Cl,x7,y1,y,A7,Il,D7,rg,q7,Ul,P7,ag,S7,Nl,O7,ng,C7,zl,I7,sg,U7,Ll,N7,ig,z7,Fl,L7,lg,F7,Rl,R7,dg,W7,Wl,G7,cg,j7,Gl,M7,pg,V7,jl,H7,mg,B7,Ml,Y7,hg,Z7,Vl,J7,ug,X7,Hl,K7,fg,Q7,Bl,eq,gg,tq,Yl,oq,_g,rq,Zl,aq,vg,nq,Jl,sq,bg,iq,Xl,lq,yg,dq,Kl,cq,wg,pq,Ql,mq,Eg,hq,ed,uq,Tg,fq,td,gq,$g,_q,od,vq,kg,bq,w1;return R=new se({}),Ro=new Kb({props:{warning:"&lcub;true}",$$slots:{default:[cL]},$$scope:{ctx:Ye}}}),pa=new P({props:{code:`from torch import nn from transformers import Trainer class CustomTrainer(Trainer): def compute_loss(self, model, inputs, return_outputs=False): labels = inputs.get("labels") # forward pass outputs = model(**inputs) logits = outputs.get("logits") # compute custom loss (suppose one has 3 labels with different weights) loss_fct = nn.CrossEntropyLoss(weight=torch.tensor([1.0, 2.0, 3.0])) loss = loss_fct(logits.view(-1, self.model.config.num_labels), labels.view(-1)) return (loss, outputs) if return_outputs else loss`,highlighted:`<span class="hljs-keyword">from</span> torch <span class="hljs-keyword">import</span> nn <span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Trainer <span class="hljs-keyword">class</span> <span class="hljs-title class_">CustomTrainer</span>(<span class="hljs-title class_ inherited__">Trainer</span>): <span class="hljs-keyword">def</span> <span class="hljs-title function_">compute_loss</span>(<span class="hljs-params">self, model, inputs, return_outputs=<span class="hljs-literal">False</span></span>): labels = inputs.get(<span class="hljs-string">&quot;labels&quot;</span>) <span class="hljs-comment"># forward pass</span> outputs = model(**inputs) logits = outputs.get(<span class="hljs-string">&quot;logits&quot;</span>) <span class="hljs-comment"># compute custom loss (suppose one has 3 labels with different weights)</span> loss_fct = nn.CrossEntropyLoss(weight=torch.tensor([<span class="hljs-number">1.0</span>, <span class="hljs-number">2.0</span>, <span class="hljs-number">3.0</span>])) loss = loss_fct(logits.view(-<span class="hljs-number">1</span>, self.model.config.num_labels), labels.view(-<span class="hljs-number">1</span>)) <span class="hljs-keyword">return</span> (loss, outputs) <span class="hljs-keyword">if</span> return_outputs <span class="hljs-keyword">else</span> loss`}}),ma=new se({}),ha=new $({props:{name:"class transformers.Trainer",anchor:"transformers.Trainer",parameters:[{name:"model",val:": typing.Union[transformers.modeling_utils.PreTrainedModel, torch.nn.modules.module.Module] = None"},{name:"args",val:": TrainingArguments = None"},{name:"data_collator",val:": typing.Optional[DataCollator] = None"},{name:"train_dataset",val:": typing.Optional[torch.utils.data.dataset.Dataset] = None"},{name:"eval_dataset",val:": typing.Optional[torch.utils.data.dataset.Dataset] = None"},{name:"tokenizer",val:": typing.Optional[transformers.tokenization_utils_base.PreTrainedTokenizerBase] = None"},{name:"model_init",val:": typing.Callable[[], transformers.modeling_utils.PreTrainedModel] = None"},{name:"compute_metrics",val:": typing.Union[typing.Callable[[transformers.trainer_utils.EvalPrediction], typing.Dict], NoneType] = None"},{name:"callbacks",val:": typing.Optional[typing.List[transformers.trainer_callback.TrainerCallback]] = None"},{name:"optimizers",val:": typing.Tuple[torch.optim.optimizer.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None)"},{name:"preprocess_logits_for_metrics",val:": typing.Callable[[torch.Tensor, torch.Tensor], torch.Tensor] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L199",parametersDescription:[{anchor:"transformers.Trainer.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <code>torch.nn.Module</code>, <em>optional</em>) &#x2014; The model to train, evaluate or use for predictions. If not provided, a <code>model_init</code> must be passed.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p><a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> is optimized to work with the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> provided by the library. You can still use your own models defined as <code>torch.nn.Module</code> as long as they work the same way as the &#x1F917; Transformers models.</p> </div>`,name:"model"},{anchor:"transformers.Trainer.args",description:`<strong>args</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a>, <em>optional</em>) &#x2014; The arguments to tweak for training. Will default to a basic instance of <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a> with the <code>output_dir</code> set to a directory named <em>tmp_trainer</em> in the current directory if not provided.`,name:"args"},{anchor:"transformers.Trainer.data_collator",description:`<strong>data_collator</strong> (<code>DataCollator</code>, <em>optional</em>) &#x2014; The function to use to form a batch from a list of elements of <code>train_dataset</code> or <code>eval_dataset</code>. Will default to <a href="/docs/transformers/pr_16143/en/main_classes/data_collator#transformers.default_data_collator">default_data_collator()</a> if no <code>tokenizer</code> is provided, an instance of <a href="/docs/transformers/pr_16143/en/main_classes/data_collator#transformers.DataCollatorWithPadding">DataCollatorWithPadding</a> otherwise.`,name:"data_collator"},{anchor:"transformers.Trainer.train_dataset",description:`<strong>train_dataset</strong> (<code>torch.utils.data.Dataset</code> or <code>torch.utils.data.IterableDataset</code>, <em>optional</em>) &#x2014; The dataset to use for training. If it is an <code>datasets.Dataset</code>, columns not accepted by the <code>model.forward()</code> method are automatically removed.</p> <p>Note that if it&#x2019;s a <code>torch.utils.data.IterableDataset</code> with some randomization and you are training in a distributed fashion, your iterable dataset should either use a internal attribute <code>generator</code> that is a <code>torch.Generator</code> for the randomization that must be identical on all processes (and the Trainer will manually set the seed of this <code>generator</code> at each epoch) or have a <code>set_epoch()</code> method that internally sets the seed of the RNGs used.`,name:"train_dataset"},{anchor:"transformers.Trainer.eval_dataset",description:`<strong>eval_dataset</strong> (<code>torch.utils.data.Dataset</code>, <em>optional</em>) &#x2014; The dataset to use for evaluation. If it is an <code>datasets.Dataset</code>, columns not accepted by the <code>model.forward()</code> method are automatically removed.`,name:"eval_dataset"},{anchor:"transformers.Trainer.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase">PreTrainedTokenizerBase</a>, <em>optional</em>) &#x2014; The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs the maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an interrupted training or reuse the fine-tuned model.`,name:"tokenizer"},{anchor:"transformers.Trainer.model_init",description:`<strong>model_init</strong> (<code>Callable[[], PreTrainedModel]</code>, <em>optional</em>) &#x2014; A function that instantiates the model to be used. If provided, each call to <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer.train">train()</a> will start from a new instance of the model as given by this function.</p> <p>The function may have zero argument, or a single one containing the optuna/Ray Tune/SigOpt trial object, to be able to choose different architectures according to hyper parameters (such as layer count, sizes of inner layers, dropout probabilities etc).`,name:"model_init"},{anchor:"transformers.Trainer.compute_metrics",description:`<strong>compute_metrics</strong> (<code>Callable[[EvalPrediction], Dict]</code>, <em>optional</em>) &#x2014; The function that will be used to compute metrics at evaluation. Must take a <a href="/docs/transformers/pr_16143/en/internal/trainer_utils#transformers.EvalPrediction">EvalPrediction</a> and return a dictionary string to metric values.`,name:"compute_metrics"},{anchor:"transformers.Trainer.callbacks",description:`<strong>callbacks</strong> (List of <a href="/docs/transformers/pr_16143/en/main_classes/callback#transformers.TrainerCallback">TrainerCallback</a>, <em>optional</em>) &#x2014; A list of callbacks to customize the training loop. Will add those to the list of default callbacks detailed in <a href="callback">here</a>.</p> <p>If you want to remove one of the default callbacks used, use the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer.remove_callback">Trainer.remove_callback()</a> method.`,name:"callbacks"},{anchor:"transformers.Trainer.optimizers",description:`<strong>optimizers</strong> (<code>Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]</code>, <em>optional</em>) &#x2014; A tuple containing the optimizer and the scheduler to use. Will default to an instance of <a href="/docs/transformers/pr_16143/en/main_classes/optimizer_schedules#transformers.AdamW">AdamW</a> on your model and a scheduler given by <a href="/docs/transformers/pr_16143/en/main_classes/optimizer_schedules#transformers.get_linear_schedule_with_warmup">get_linear_schedule_with_warmup()</a> controlled by <code>args</code>.`,name:"optimizers"},{anchor:"transformers.Trainer.preprocess_logits_for_metrics",description:`<strong>preprocess_logits_for_metrics</strong> (<code>Callable[[torch.Tensor, torch.Tensor], torch.Tensor]</code>, <em>optional</em>) &#x2014; A function that preprocess the logits right before caching them at each evaluation step. Must take two tensors, the logits and the labels, and return the logits once processed as desired. The modifications made by this function will be reflected in the predictions received by <code>compute_metrics</code>.</p> <p>Note that the labels (second parameter) will be <code>None</code> if the dataset does not have them.`,name:"preprocess_logits_for_metrics"}]}}),ua=new $({props:{name:"add_callback",anchor:"transformers.Trainer.add_callback",parameters:[{name:"callback",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L513",parametersDescription:[{anchor:"transformers.Trainer.add_callback.callback",description:`<strong>callback</strong> (<code>type</code> or <code>TrainerCallback</code>) &#x2014; A <code>TrainerCallback</code> class or an instance of a <code>TrainerCallback</code>. In the first case, will instantiate a member of that class.`,name:"callback"}]}}),ga=new $({props:{name:"autocast_smart_context_manager",anchor:"transformers.Trainer.autocast_smart_context_manager",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L1956"}}),va=new $({props:{name:"compute_loss",anchor:"transformers.Trainer.compute_loss",parameters:[{name:"model",val:""},{name:"inputs",val:""},{name:"return_outputs",val:" = False"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L2020"}}),ba=new $({props:{name:"create_optimizer",anchor:"transformers.Trainer.create_optimizer",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L836"}}),wa=new $({props:{name:"create_optimizer_and_scheduler",anchor:"transformers.Trainer.create_optimizer_and_scheduler",parameters:[{name:"num_training_steps",val:": int"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L825"}}),Ea=new $({props:{name:"create_scheduler",anchor:"transformers.Trainer.create_scheduler",parameters:[{name:"num_training_steps",val:": int"},{name:"optimizer",val:": Optimizer = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L921",parametersDescription:[{anchor:"transformers.Trainer.create_scheduler.num_training_steps",description:"<strong>num_training_steps</strong> (int) &#x2014; The number of training steps to do.",name:"num_training_steps"}]}}),Ta=new $({props:{name:"evaluate",anchor:"transformers.Trainer.evaluate",parameters:[{name:"eval_dataset",val:": typing.Optional[torch.utils.data.dataset.Dataset] = None"},{name:"ignore_keys",val:": typing.Optional[typing.List[str]] = None"},{name:"metric_key_prefix",val:": str = 'eval'"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L2234",parametersDescription:[{anchor:"transformers.Trainer.evaluate.eval_dataset",description:`<strong>eval_dataset</strong> (<code>Dataset</code>, <em>optional</em>) &#x2014; Pass a dataset if you wish to override <code>self.eval_dataset</code>. If it is an <code>datasets.Dataset</code>, columns not accepted by the <code>model.forward()</code> method are automatically removed. It must implement the <code>__len__</code> method.`,name:"eval_dataset"},{anchor:"transformers.Trainer.evaluate.ignore_keys",description:`<strong>ignore_keys</strong> (<code>Lst[str]</code>, <em>optional</em>) &#x2014; A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions.`,name:"ignore_keys"},{anchor:"transformers.Trainer.evaluate.metric_key_prefix",description:`<strong>metric_key_prefix</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;eval&quot;</code>) &#x2014; An optional prefix to be used as the metrics key prefix. For example the metrics &#x201C;bleu&#x201D; will be named &#x201C;eval_bleu&#x201D; if the prefix is &#x201C;eval&#x201D; (default)`,name:"metric_key_prefix"}],returnDescription:` <p>A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The dictionary also contains the epoch number which comes from the training state.</p> `}}),ka=new $({props:{name:"evaluation_loop",anchor:"transformers.Trainer.evaluation_loop",parameters:[{name:"dataloader",val:": DataLoader"},{name:"description",val:": str"},{name:"prediction_loss_only",val:": typing.Optional[bool] = None"},{name:"ignore_keys",val:": typing.Optional[typing.List[str]] = None"},{name:"metric_key_prefix",val:": str = 'eval'"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L2362"}}),xa=new $({props:{name:"floating_point_ops",anchor:"transformers.Trainer.floating_point_ops",parameters:[{name:"inputs",val:": typing.Dict[str, typing.Union[torch.Tensor, typing.Any]]"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L2683",parametersDescription:[{anchor:"transformers.Trainer.floating_point_ops.inputs",description:`<strong>inputs</strong> (<code>Dict[str, Union[torch.Tensor, Any]]</code>) &#x2014; The inputs and targets of the model.`,name:"inputs"}],returnDescription:` <p>The number of floating-point operations.</p> `,returnType:` <p><code>int</code></p> `}}),Da=new $({props:{name:"get_eval_dataloader",anchor:"transformers.Trainer.get_eval_dataloader",parameters:[{name:"eval_dataset",val:": typing.Optional[torch.utils.data.dataset.Dataset] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L735",parametersDescription:[{anchor:"transformers.Trainer.get_eval_dataloader.eval_dataset",description:`<strong>eval_dataset</strong> (<code>torch.utils.data.Dataset</code>, <em>optional</em>) &#x2014; If provided, will override <code>self.eval_dataset</code>. If it is an <code>datasets.Dataset</code>, columns not accepted by the <code>model.forward()</code> method are automatically removed. It must implement <code>__len__</code>.`,name:"eval_dataset"}]}}),Pa=new $({props:{name:"get_optimizer_cls_and_kwargs",anchor:"transformers.Trainer.get_optimizer_cls_and_kwargs",parameters:[{name:"args",val:": TrainingArguments"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L873",parametersDescription:[{anchor:"transformers.Trainer.get_optimizer_cls_and_kwargs.args",description:`<strong>args</strong> (<code>transformers.training_args.TrainingArguments</code>) &#x2014; The training arguments for the training session.`,name:"args"}]}}),Sa=new $({props:{name:"get_test_dataloader",anchor:"transformers.Trainer.get_test_dataloader",parameters:[{name:"test_dataset",val:": Dataset"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L782",parametersDescription:[{anchor:"transformers.Trainer.get_test_dataloader.test_dataset",description:`<strong>test_dataset</strong> (<code>torch.utils.data.Dataset</code>, <em>optional</em>) &#x2014; The test dataset to use. If it is an <code>datasets.Dataset</code>, columns not accepted by the <code>model.forward()</code> method are automatically removed. It must implement <code>__len__</code>.`,name:"test_dataset"}]}}),Ca=new $({props:{name:"get_train_dataloader",anchor:"transformers.Trainer.get_train_dataloader",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L660"}}),Ua=new $({props:{name:"hyperparameter_search",anchor:"transformers.Trainer.hyperparameter_search",parameters:[{name:"hp_space",val:": typing.Union[typing.Callable[[ForwardRef('optuna.Trial')], typing.Dict[str, float]], NoneType] = None"},{name:"compute_objective",val:": typing.Union[typing.Callable[[typing.Dict[str, float]], float], NoneType] = None"},{name:"n_trials",val:": int = 20"},{name:"direction",val:": str = 'minimize'"},{name:"backend",val:": typing.Union[ForwardRef('str'), transformers.trainer_utils.HPSearchBackend, NoneType] = None"},{name:"hp_name",val:": typing.Union[typing.Callable[[ForwardRef('optuna.Trial')], str], NoneType] = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L1812",parametersDescription:[{anchor:"transformers.Trainer.hyperparameter_search.hp_space",description:`<strong>hp_space</strong> (<code>Callable[[&quot;optuna.Trial&quot;], Dict[str, float]]</code>, <em>optional</em>) &#x2014; A function that defines the hyperparameter search space. Will default to <code>default_hp_space_optuna()</code>or <code>default_hp_space_ray()</code>or <code>default_hp_space_sigopt()</code>depending on your backend.`,name:"hp_space"},{anchor:"transformers.Trainer.hyperparameter_search.compute_objective",description:`<strong>compute_objective</strong> (<code>Callable[[Dict[str, float]], float]</code>, <em>optional</em>) &#x2014; A function computing the objective to minimize or maximize from the metrics returned by the <code>evaluate</code> method. Will default to <code>default_compute_objective()</code>`,name:"compute_objective"},{anchor:"transformers.Trainer.hyperparameter_search.n_trials",description:`<strong>n_trials</strong> (<code>int</code>, <em>optional</em>, defaults to 100) &#x2014; The number of trial runs to test.`,name:"n_trials"},{anchor:"transformers.Trainer.hyperparameter_search.direction(str,",description:`<strong>direction(<code>str</code>,</strong> <em>optional</em>, defaults to <code>&quot;minimize&quot;</code>) &#x2014; Whether to optimize greater or lower objects. Can be <code>&quot;minimize&quot;</code> or <code>&quot;maximize&quot;</code>, you should pick <code>&quot;minimize&quot;</code> when optimizing the validation loss, <code>&quot;maximize&quot;</code> when optimizing one or several metrics.`,name:"direction(str,"},{anchor:"transformers.Trainer.hyperparameter_search.backend(str",description:`<strong>backend(<code>str</code></strong> or <code>HPSearchBackend</code>, <em>optional</em>) &#x2014; The backend to use for hyperparameter search. Will default to optuna or Ray Tune or SigOpt, depending on which one is installed. If all are installed, will default to optuna. kwargs &#x2014; Additional keyword arguments passed along to <code>optuna.create_study</code> or <code>ray.tune.run</code>. For more information see:</p> <ul> <li>the documentation of <a href="https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.html" rel="nofollow">optuna.create_study</a></li> <li>the documentation of <a href="https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run" rel="nofollow">tune.run</a></li> <li>the documentation of <a href="https://app.sigopt.com/docs/endpoints/experiments/create" rel="nofollow">sigopt</a></li> </ul>`,name:"backend(str"}],returnDescription:` <p>All the information about the best run.</p> `}}),Zo=new Kb({props:{warning:"&lcub;true}",$$slots:{default:[pL]},$$scope:{ctx:Ye}}}),Na=new $({props:{name:"init_git_repo",anchor:"transformers.Trainer.init_git_repo",parameters:[{name:"at_init",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L2701",parametersDescription:[{anchor:"transformers.Trainer.init_git_repo.at_init",description:`<strong>at_init</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether this function is called before any training or not. If <code>self.args.overwrite_output_dir</code> is <code>True</code> and <code>at_init</code> is <code>True</code>, the path to the repo (which is <code>self.args.output_dir</code>) might be wiped out.`,name:"at_init"}]}}),La=new $({props:{name:"is_local_process_zero",anchor:"transformers.Trainer.is_local_process_zero",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L2044"}}),Fa=new $({props:{name:"is_world_process_zero",anchor:"transformers.Trainer.is_world_process_zero",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L2051"}}),Wa=new $({props:{name:"log",anchor:"transformers.Trainer.log",parameters:[{name:"logs",val:": typing.Dict[str, float]"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L1905",parametersDescription:[{anchor:"transformers.Trainer.log.logs",description:`<strong>logs</strong> (<code>Dict[str, float]</code>) &#x2014; The values to log.`,name:"logs"}]}}),ja=new $({props:{name:"log_metrics",anchor:"transformers.trainer_pt_utils.log_metrics",parameters:[{name:"split",val:""},{name:"metrics",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_pt_utils.py#L855",parametersDescription:[{anchor:"transformers.trainer_pt_utils.log_metrics.split",description:`<strong>split</strong> (<code>str</code>) &#x2014; Mode/split name: one of <code>train</code>, <code>eval</code>, <code>test</code>`,name:"split"},{anchor:"transformers.trainer_pt_utils.log_metrics.metrics",description:`<strong>metrics</strong> (<code>Dict[str, float]</code>) &#x2014; The metrics returned from train/evaluate/predictmetrics: metrics dict`,name:"metrics"}]}}),Ma=new P({props:{code:`init_mem_cpu_alloc_delta = 1301MB init_mem_cpu_peaked_delta = 154MB init_mem_gpu_alloc_delta = 230MB init_mem_gpu_peaked_delta = 0MB train_mem_cpu_alloc_delta = 1345MB train_mem_cpu_peaked_delta = 0MB train_mem_gpu_alloc_delta = 693MB train_mem_gpu_peaked_delta = 7MB`,highlighted:`<span class="hljs-attr">init_mem_cpu_alloc_delta</span> = <span class="hljs-number">1301</span>MB <span class="hljs-attr">init_mem_cpu_peaked_delta</span> = <span class="hljs-number">154</span>MB <span class="hljs-attr">init_mem_gpu_alloc_delta</span> = <span class="hljs-number">230</span>MB <span class="hljs-attr">init_mem_gpu_peaked_delta</span> = <span class="hljs-number">0</span>MB <span class="hljs-attr">train_mem_cpu_alloc_delta</span> = <span class="hljs-number">1345</span>MB <span class="hljs-attr">train_mem_cpu_peaked_delta</span> = <span class="hljs-number">0</span>MB <span class="hljs-attr">train_mem_gpu_alloc_delta</span> = <span class="hljs-number">693</span>MB <span class="hljs-attr">train_mem_gpu_peaked_delta</span> = <span class="hljs-number">7</span>MB`}}),Ba=new $({props:{name:"metrics_format",anchor:"transformers.trainer_pt_utils.metrics_format",parameters:[{name:"metrics",val:": typing.Dict[str, float]"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_pt_utils.py#L829",parametersDescription:[{anchor:"transformers.trainer_pt_utils.metrics_format.metrics",description:`<strong>metrics</strong> (<code>Dict[str, float]</code>) &#x2014; The metrics returned from train/evaluate/predict`,name:"metrics"}],returnDescription:` <p>The reformatted metrics</p> `,returnType:` <p>metrics (<code>Dict[str, float]</code>)</p> `}}),Ya=new $({props:{name:"num_examples",anchor:"transformers.Trainer.num_examples",parameters:[{name:"dataloader",val:": DataLoader"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L938"}}),Ja=new $({props:{name:"pop_callback",anchor:"transformers.Trainer.pop_callback",parameters:[{name:"callback",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L524",parametersDescription:[{anchor:"transformers.Trainer.pop_callback.callback",description:`<strong>callback</strong> (<code>type</code> or <code>TrainerCallback</code>) &#x2014; A <code>TrainerCallback</code> class or an instance of a <code>TrainerCallback</code>. In the first case, will pop the first member of that class found in the list of callbacks.`,name:"callback"}],returnDescription:` <p>The callback removed, if found.</p> `,returnType:` <p><code>TrainerCallback</code></p> `}}),Qa=new $({props:{name:"predict",anchor:"transformers.Trainer.predict",parameters:[{name:"test_dataset",val:": Dataset"},{name:"ignore_keys",val:": typing.Optional[typing.List[str]] = None"},{name:"metric_key_prefix",val:": str = 'test'"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L2303",parametersDescription:[{anchor:"transformers.Trainer.predict.test_dataset",description:`<strong>test_dataset</strong> (<code>Dataset</code>) &#x2014; Dataset to run the predictions on. If it is an <code>datasets.Dataset</code>, columns not accepted by the <code>model.forward()</code> method are automatically removed. Has to implement the method <code>__len__</code>`,name:"test_dataset"},{anchor:"transformers.Trainer.predict.ignore_keys",description:`<strong>ignore_keys</strong> (<code>Lst[str]</code>, <em>optional</em>) &#x2014; A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions.`,name:"ignore_keys"},{anchor:"transformers.Trainer.predict.metric_key_prefix",description:`<strong>metric_key_prefix</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;test&quot;</code>) &#x2014; An optional prefix to be used as the metrics key prefix. For example the metrics &#x201C;bleu&#x201D; will be named &#x201C;test_bleu&#x201D; if the prefix is &#x201C;test&#x201D; (default)`,name:"metric_key_prefix"}]}}),er=new Kb({props:{$$slots:{default:[mL]},$$scope:{ctx:Ye}}}),on=new $({props:{name:"prediction_loop",anchor:"transformers.Trainer.prediction_loop",parameters:[{name:"dataloader",val:": DataLoader"},{name:"description",val:": str"},{name:"prediction_loss_only",val:": typing.Optional[bool] = None"},{name:"ignore_keys",val:": typing.Optional[typing.List[str]] = None"},{name:"metric_key_prefix",val:": str = 'eval'"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L2878"}}),rn=new $({props:{name:"prediction_step",anchor:"transformers.Trainer.prediction_step",parameters:[{name:"model",val:": Module"},{name:"inputs",val:": typing.Dict[str, typing.Union[torch.Tensor, typing.Any]]"},{name:"prediction_loss_only",val:": bool"},{name:"ignore_keys",val:": typing.Optional[typing.List[str]] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L2586",parametersDescription:[{anchor:"transformers.Trainer.prediction_step.model",description:`<strong>model</strong> (<code>nn.Module</code>) &#x2014; The model to evaluate.`,name:"model"},{anchor:"transformers.Trainer.prediction_step.inputs",description:`<strong>inputs</strong> (<code>Dict[str, Union[torch.Tensor, Any]]</code>) &#x2014; The inputs and targets of the model.</p> <p>The dictionary will be unpacked before being fed to the model. Most models expect the targets under the argument <code>labels</code>. Check your model&#x2019;s documentation for all accepted arguments.`,name:"inputs"},{anchor:"transformers.Trainer.prediction_step.prediction_loss_only",description:`<strong>prediction_loss_only</strong> (<code>bool</code>) &#x2014; Whether or not to return the loss only.`,name:"prediction_loss_only"},{anchor:"transformers.Trainer.prediction_step.ignore_keys",description:`<strong>ignore_keys</strong> (<code>Lst[str]</code>, <em>optional</em>) &#x2014; A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions.`,name:"ignore_keys"}],returnDescription:` <p>A tuple with the loss, logits and labels (each being optional).</p> `,returnType:` <p>Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]</p> `}}),an=new $({props:{name:"push_to_hub",anchor:"transformers.Trainer.push_to_hub",parameters:[{name:"commit_message",val:": typing.Optional[str] = 'End of training'"},{name:"blocking",val:": bool = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L2824",parametersDescription:[{anchor:"transformers.Trainer.push_to_hub.commit_message",description:`<strong>commit_message</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;End of training&quot;</code>) &#x2014; Message to commit while pushing.`,name:"commit_message"},{anchor:"transformers.Trainer.push_to_hub.blocking",description:`<strong>blocking</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether the function should return only when the <code>git push</code> has finished. kwargs &#x2014; Additional keyword arguments passed along to <code>create_model_card()</code>`,name:"blocking"}],returnDescription:` <p>The url of the commit of your model in the given repository if <code>blocking=False</code>, a tuple with the url of the commit and an object to track the progress of the commit if <code>blocking=True</code></p> `}}),nn=new $({props:{name:"remove_callback",anchor:"transformers.Trainer.remove_callback",parameters:[{name:"callback",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L540",parametersDescription:[{anchor:"transformers.Trainer.remove_callback.callback",description:`<strong>callback</strong> (<code>type</code> or <code>TrainerCallback</code>) &#x2014; A <code>TrainerCallback</code> class or an instance of a <code>TrainerCallback</code>. In the first case, will remove the first member of that class found in the list of callbacks.`,name:"callback"}]}}),ln=new $({props:{name:"save_metrics",anchor:"transformers.trainer_pt_utils.save_metrics",parameters:[{name:"split",val:""},{name:"metrics",val:""},{name:"combined",val:" = True"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_pt_utils.py#L945",parametersDescription:[{anchor:"transformers.trainer_pt_utils.save_metrics.split",description:`<strong>split</strong> (<code>str</code>) &#x2014; Mode/split name: one of <code>train</code>, <code>eval</code>, <code>test</code>, <code>all</code>`,name:"split"},{anchor:"transformers.trainer_pt_utils.save_metrics.metrics",description:`<strong>metrics</strong> (<code>Dict[str, float]</code>) &#x2014; The metrics returned from train/evaluate/predict`,name:"metrics"},{anchor:"transformers.trainer_pt_utils.save_metrics.combined",description:`<strong>combined</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Creates combined metrics by updating <code>all_results.json</code> with metrics of this call`,name:"combined"}]}}),pn=new $({props:{name:"save_model",anchor:"transformers.Trainer.save_model",parameters:[{name:"output_dir",val:": typing.Optional[str] = None"},{name:"_internal_call",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L2063"}}),hn=new $({props:{name:"save_state",anchor:"transformers.trainer_pt_utils.save_state",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_pt_utils.py#L983"}}),un=new $({props:{name:"train",anchor:"transformers.Trainer.train",parameters:[{name:"resume_from_checkpoint",val:": typing.Union[str, bool, NoneType] = None"},{name:"trial",val:": typing.Union[ForwardRef('optuna.Trial'), typing.Dict[str, typing.Any]] = None"},{name:"ignore_keys_for_eval",val:": typing.Optional[typing.List[str]] = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L1102",parametersDescription:[{anchor:"transformers.Trainer.train.resume_from_checkpoint",description:`<strong>resume_from_checkpoint</strong> (<code>str</code> or <code>bool</code>, <em>optional</em>) &#x2014; If a <code>str</code>, local path to a saved checkpoint as saved by a previous instance of <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>. If a <code>bool</code> and equals <code>True</code>, load the last checkpoint in <em>args.output_dir</em> as saved by a previous instance of <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>. If present, training will resume from the model/optimizer/scheduler states loaded here.`,name:"resume_from_checkpoint"},{anchor:"transformers.Trainer.train.trial",description:`<strong>trial</strong> (<code>optuna.Trial</code> or <code>Dict[str, Any]</code>, <em>optional</em>) &#x2014; The trial run or the hyperparameter dictionary for hyperparameter search.`,name:"trial"},{anchor:"transformers.Trainer.train.ignore_keys_for_eval",description:`<strong>ignore_keys_for_eval</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions for evaluation during the training. kwargs &#x2014; Additional keyword arguments used to hide deprecated arguments`,name:"ignore_keys_for_eval"}]}}),fn=new $({props:{name:"training_step",anchor:"transformers.Trainer.training_step",parameters:[{name:"model",val:": Module"},{name:"inputs",val:": typing.Dict[str, typing.Union[torch.Tensor, typing.Any]]"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L1971",parametersDescription:[{anchor:"transformers.Trainer.training_step.model",description:`<strong>model</strong> (<code>nn.Module</code>) &#x2014; The model to train.`,name:"model"},{anchor:"transformers.Trainer.training_step.inputs",description:`<strong>inputs</strong> (<code>Dict[str, Union[torch.Tensor, Any]]</code>) &#x2014; The inputs and targets of the model.</p> <p>The dictionary will be unpacked before being fed to the model. Most models expect the targets under the argument <code>labels</code>. Check your model&#x2019;s documentation for all accepted arguments.`,name:"inputs"}],returnDescription:` <p>The tensor with training loss on this batch.</p> `,returnType:` <p><code>torch.Tensor</code></p> `}}),gn=new se({}),_n=new $({props:{name:"class transformers.Seq2SeqTrainer",anchor:"transformers.Seq2SeqTrainer",parameters:[{name:"model",val:": typing.Union[transformers.modeling_utils.PreTrainedModel, torch.nn.modules.module.Module] = None"},{name:"args",val:": TrainingArguments = None"},{name:"data_collator",val:": typing.Optional[DataCollator] = None"},{name:"train_dataset",val:": typing.Optional[torch.utils.data.dataset.Dataset] = None"},{name:"eval_dataset",val:": typing.Optional[torch.utils.data.dataset.Dataset] = None"},{name:"tokenizer",val:": typing.Optional[transformers.tokenization_utils_base.PreTrainedTokenizerBase] = None"},{name:"model_init",val:": typing.Callable[[], transformers.modeling_utils.PreTrainedModel] = None"},{name:"compute_metrics",val:": typing.Union[typing.Callable[[transformers.trainer_utils.EvalPrediction], typing.Dict], NoneType] = None"},{name:"callbacks",val:": typing.Optional[typing.List[transformers.trainer_callback.TrainerCallback]] = None"},{name:"optimizers",val:": typing.Tuple[torch.optim.optimizer.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None)"},{name:"preprocess_logits_for_metrics",val:": typing.Callable[[torch.Tensor, torch.Tensor], torch.Tensor] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_seq2seq.py#L30"}}),vn=new $({props:{name:"evaluate",anchor:"transformers.Seq2SeqTrainer.evaluate",parameters:[{name:"eval_dataset",val:": typing.Optional[torch.utils.data.dataset.Dataset] = None"},{name:"ignore_keys",val:": typing.Optional[typing.List[str]] = None"},{name:"metric_key_prefix",val:": str = 'eval'"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"num_beams",val:": typing.Optional[int] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_seq2seq.py#L31",parametersDescription:[{anchor:"transformers.Seq2SeqTrainer.evaluate.eval_dataset",description:`<strong>eval_dataset</strong> (<code>Dataset</code>, <em>optional</em>) &#x2014; Pass a dataset if you wish to override <code>self.eval_dataset</code>. If it is an <code>datasets.Dataset</code>, columns not accepted by the <code>model.forward()</code> method are automatically removed. It must implement the <code>__len__</code> method.`,name:"eval_dataset"},{anchor:"transformers.Seq2SeqTrainer.evaluate.ignore_keys",description:`<strong>ignore_keys</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions.`,name:"ignore_keys"},{anchor:"transformers.Seq2SeqTrainer.evaluate.metric_key_prefix",description:`<strong>metric_key_prefix</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;eval&quot;</code>) &#x2014; An optional prefix to be used as the metrics key prefix. For example the metrics &#x201C;bleu&#x201D; will be named &#x201C;eval_bleu&#x201D; if the prefix is <code>&quot;eval&quot;</code> (default)`,name:"metric_key_prefix"},{anchor:"transformers.Seq2SeqTrainer.evaluate.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; The maximum target length to use when predicting with the generate method.`,name:"max_length"},{anchor:"transformers.Seq2SeqTrainer.evaluate.num_beams",description:`<strong>num_beams</strong> (<code>int</code>, <em>optional</em>) &#x2014; Number of beams for beam search that will be used when predicting with the generate method. 1 means no beam search.`,name:"num_beams"}],returnDescription:` <p>A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The dictionary also contains the epoch number which comes from the training state.</p> `}}),yn=new $({props:{name:"predict",anchor:"transformers.Seq2SeqTrainer.predict",parameters:[{name:"test_dataset",val:": Dataset"},{name:"ignore_keys",val:": typing.Optional[typing.List[str]] = None"},{name:"metric_key_prefix",val:": str = 'test'"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"num_beams",val:": typing.Optional[int] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_seq2seq.py#L72",parametersDescription:[{anchor:"transformers.Seq2SeqTrainer.predict.test_dataset",description:`<strong>test_dataset</strong> (<code>Dataset</code>) &#x2014; Dataset to run the predictions on. If it is an <code>datasets.Dataset</code>, columns not accepted by the <code>model.forward()</code> method are automatically removed. Has to implement the method <code>__len__</code>`,name:"test_dataset"},{anchor:"transformers.Seq2SeqTrainer.predict.ignore_keys",description:`<strong>ignore_keys</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions.`,name:"ignore_keys"},{anchor:"transformers.Seq2SeqTrainer.predict.metric_key_prefix",description:`<strong>metric_key_prefix</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;eval&quot;</code>) &#x2014; An optional prefix to be used as the metrics key prefix. For example the metrics &#x201C;bleu&#x201D; will be named &#x201C;eval_bleu&#x201D; if the prefix is <code>&quot;eval&quot;</code> (default)`,name:"metric_key_prefix"},{anchor:"transformers.Seq2SeqTrainer.predict.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; The maximum target length to use when predicting with the generate method.`,name:"max_length"},{anchor:"transformers.Seq2SeqTrainer.predict.num_beams",description:`<strong>num_beams</strong> (<code>int</code>, <em>optional</em>) &#x2014; Number of beams for beam search that will be used when predicting with the generate method. 1 means no beam search.`,name:"num_beams"}]}}),nr=new Kb({props:{$$slots:{default:[hL]},$$scope:{ctx:Ye}}}),Tn=new se({}),$n=new $({props:{name:"class transformers.TrainingArguments",anchor:"transformers.TrainingArguments",parameters:[{name:"output_dir",val:": str"},{name:"overwrite_output_dir",val:": bool = False"},{name:"do_train",val:": bool = False"},{name:"do_eval",val:": bool = False"},{name:"do_predict",val:": bool = False"},{name:"evaluation_strategy",val:": IntervalStrategy = 'no'"},{name:"prediction_loss_only",val:": bool = False"},{name:"per_device_train_batch_size",val:": int = 8"},{name:"per_device_eval_batch_size",val:": int = 8"},{name:"per_gpu_train_batch_size",val:": typing.Optional[int] = None"},{name:"per_gpu_eval_batch_size",val:": typing.Optional[int] = None"},{name:"gradient_accumulation_steps",val:": int = 1"},{name:"eval_accumulation_steps",val:": typing.Optional[int] = None"},{name:"learning_rate",val:": float = 5e-05"},{name:"weight_decay",val:": float = 0.0"},{name:"adam_beta1",val:": float = 0.9"},{name:"adam_beta2",val:": float = 0.999"},{name:"adam_epsilon",val:": float = 1e-08"},{name:"max_grad_norm",val:": float = 1.0"},{name:"num_train_epochs",val:": float = 3.0"},{name:"max_steps",val:": int = -1"},{name:"lr_scheduler_type",val:": SchedulerType = 'linear'"},{name:"warmup_ratio",val:": float = 0.0"},{name:"warmup_steps",val:": int = 0"},{name:"log_level",val:": typing.Optional[str] = 'passive'"},{name:"log_level_replica",val:": typing.Optional[str] = 'passive'"},{name:"log_on_each_node",val:": bool = True"},{name:"logging_dir",val:": typing.Optional[str] = None"},{name:"logging_strategy",val:": IntervalStrategy = 'steps'"},{name:"logging_first_step",val:": bool = False"},{name:"logging_steps",val:": int = 500"},{name:"logging_nan_inf_filter",val:": str = True"},{name:"save_strategy",val:": IntervalStrategy = 'steps'"},{name:"save_steps",val:": int = 500"},{name:"save_total_limit",val:": typing.Optional[int] = None"},{name:"save_on_each_node",val:": bool = False"},{name:"no_cuda",val:": bool = False"},{name:"seed",val:": int = 42"},{name:"data_seed",val:": int = None"},{name:"bf16",val:": bool = False"},{name:"fp16",val:": bool = False"},{name:"fp16_opt_level",val:": str = 'O1'"},{name:"half_precision_backend",val:": str = 'auto'"},{name:"bf16_full_eval",val:": bool = False"},{name:"fp16_full_eval",val:": bool = False"},{name:"tf32",val:": bool = None"},{name:"local_rank",val:": int = -1"},{name:"xpu_backend",val:": str = None"},{name:"tpu_num_cores",val:": typing.Optional[int] = None"},{name:"tpu_metrics_debug",val:": bool = False"},{name:"debug",val:": str = ''"},{name:"dataloader_drop_last",val:": bool = False"},{name:"eval_steps",val:": int = None"},{name:"dataloader_num_workers",val:": int = 0"},{name:"past_index",val:": int = -1"},{name:"run_name",val:": typing.Optional[str] = None"},{name:"disable_tqdm",val:": typing.Optional[bool] = None"},{name:"remove_unused_columns",val:": typing.Optional[bool] = True"},{name:"label_names",val:": typing.Optional[typing.List[str]] = None"},{name:"load_best_model_at_end",val:": typing.Optional[bool] = False"},{name:"metric_for_best_model",val:": typing.Optional[str] = None"},{name:"greater_is_better",val:": typing.Optional[bool] = None"},{name:"ignore_data_skip",val:": bool = False"},{name:"sharded_ddp",val:": str = ''"},{name:"deepspeed",val:": typing.Optional[str] = None"},{name:"label_smoothing_factor",val:": float = 0.0"},{name:"optim",val:": OptimizerNames = 'adamw_hf'"},{name:"adafactor",val:": bool = False"},{name:"group_by_length",val:": bool = False"},{name:"length_column_name",val:": typing.Optional[str] = 'length'"},{name:"report_to",val:": typing.Optional[typing.List[str]] = None"},{name:"ddp_find_unused_parameters",val:": typing.Optional[bool] = None"},{name:"ddp_bucket_cap_mb",val:": typing.Optional[int] = None"},{name:"dataloader_pin_memory",val:": bool = True"},{name:"skip_memory_metrics",val:": bool = True"},{name:"use_legacy_prediction_loop",val:": bool = False"},{name:"push_to_hub",val:": bool = False"},{name:"resume_from_checkpoint",val:": typing.Optional[str] = None"},{name:"hub_model_id",val:": str = None"},{name:"hub_strategy",val:": HubStrategy = 'every_save'"},{name:"hub_token",val:": str = None"},{name:"gradient_checkpointing",val:": bool = False"},{name:"fp16_backend",val:": str = 'auto'"},{name:"push_to_hub_model_id",val:": str = None"},{name:"push_to_hub_organization",val:": str = None"},{name:"push_to_hub_token",val:": str = None"},{name:"mp_parameters",val:": str = ''"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/training_args.py#L86",parametersDescription:[{anchor:"transformers.TrainingArguments.output_dir",description:`<strong>output_dir</strong> (<code>str</code>) &#x2014; The output directory where the model predictions and checkpoints will be written.`,name:"output_dir"},{anchor:"transformers.TrainingArguments.overwrite_output_dir",description:`<strong>overwrite_output_dir</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; If <code>True</code>, overwrite the content of the output directory. Use this to continue training if <code>output_dir</code> points to a checkpoint directory.`,name:"overwrite_output_dir"},{anchor:"transformers.TrainingArguments.do_train",description:`<strong>do_train</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to run training or not. This argument is not directly used by <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>, it&#x2019;s intended to be used by your training/evaluation scripts instead. See the <a href="https://github.com/huggingface/transformers/tree/master/examples" rel="nofollow">example scripts</a> for more details.`,name:"do_train"},{anchor:"transformers.TrainingArguments.do_eval",description:`<strong>do_eval</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to run evaluation on the validation set or not. Will be set to <code>True</code> if <code>evaluation_strategy</code> is different from <code>&quot;no&quot;</code>. This argument is not directly used by <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>, it&#x2019;s intended to be used by your training/evaluation scripts instead. See the <a href="https://github.com/huggingface/transformers/tree/master/examples" rel="nofollow">example scripts</a> for more details.`,name:"do_eval"},{anchor:"transformers.TrainingArguments.do_predict",description:`<strong>do_predict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to run predictions on the test set or not. This argument is not directly used by <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>, it&#x2019;s intended to be used by your training/evaluation scripts instead. See the <a href="https://github.com/huggingface/transformers/tree/master/examples" rel="nofollow">example scripts</a> for more details.`,name:"do_predict"},{anchor:"transformers.TrainingArguments.evaluation_strategy",description:`<strong>evaluation_strategy</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/trainer_utils#transformers.IntervalStrategy">IntervalStrategy</a>, <em>optional</em>, defaults to <code>&quot;no&quot;</code>) &#x2014; The evaluation strategy to adopt during training. Possible values are:</p> <ul> <li><code>&quot;no&quot;</code>: No evaluation is done during training.</li> <li><code>&quot;steps&quot;</code>: Evaluation is done (and logged) every <code>eval_steps</code>.</li> <li><code>&quot;epoch&quot;</code>: Evaluation is done at the end of each epoch.</li> </ul>`,name:"evaluation_strategy"},{anchor:"transformers.TrainingArguments.prediction_loss_only",description:`<strong>prediction_loss_only</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; When performing evaluation and generating predictions, only returns the loss.`,name:"prediction_loss_only"},{anchor:"transformers.TrainingArguments.per_device_train_batch_size",description:`<strong>per_device_train_batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; The batch size per GPU/TPU core/CPU for training.`,name:"per_device_train_batch_size"},{anchor:"transformers.TrainingArguments.per_device_eval_batch_size",description:`<strong>per_device_eval_batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; The batch size per GPU/TPU core/CPU for evaluation.`,name:"per_device_eval_batch_size"},{anchor:"transformers.TrainingArguments.gradient_accumulation_steps",description:`<strong>gradient_accumulation_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Number of updates steps to accumulate the gradients for, before performing a backward/update pass.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p>When using gradient accumulation, one step is counted as one step with backward pass. Therefore, logging, evaluation, save will be conducted every <code>gradient_accumulation_steps * xxx_step</code> training examples.</p> </div>`,name:"gradient_accumulation_steps"},{anchor:"transformers.TrainingArguments.eval_accumulation_steps",description:`<strong>eval_accumulation_steps</strong> (<code>int</code>, <em>optional</em>) &#x2014; Number of predictions steps to accumulate the output tensors for, before moving the results to the CPU. If left unset, the whole predictions are accumulated on GPU/TPU before being moved to the CPU (faster but requires more memory).`,name:"eval_accumulation_steps"},{anchor:"transformers.TrainingArguments.learning_rate",description:`<strong>learning_rate</strong> (<code>float</code>, <em>optional</em>, defaults to 5e-5) &#x2014; The initial learning rate for <a href="/docs/transformers/pr_16143/en/main_classes/optimizer_schedules#transformers.AdamW">AdamW</a> optimizer.`,name:"learning_rate"},{anchor:"transformers.TrainingArguments.weight_decay",description:`<strong>weight_decay</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; The weight decay to apply (if not zero) to all layers except all bias and LayerNorm weights in <a href="/docs/transformers/pr_16143/en/main_classes/optimizer_schedules#transformers.AdamW">AdamW</a> optimizer.`,name:"weight_decay"},{anchor:"transformers.TrainingArguments.adam_beta1",description:`<strong>adam_beta1</strong> (<code>float</code>, <em>optional</em>, defaults to 0.9) &#x2014; The beta1 hyperparameter for the <a href="/docs/transformers/pr_16143/en/main_classes/optimizer_schedules#transformers.AdamW">AdamW</a> optimizer.`,name:"adam_beta1"},{anchor:"transformers.TrainingArguments.adam_beta2",description:`<strong>adam_beta2</strong> (<code>float</code>, <em>optional</em>, defaults to 0.999) &#x2014; The beta2 hyperparameter for the <a href="/docs/transformers/pr_16143/en/main_classes/optimizer_schedules#transformers.AdamW">AdamW</a> optimizer.`,name:"adam_beta2"},{anchor:"transformers.TrainingArguments.adam_epsilon",description:`<strong>adam_epsilon</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-8) &#x2014; The epsilon hyperparameter for the <a href="/docs/transformers/pr_16143/en/main_classes/optimizer_schedules#transformers.AdamW">AdamW</a> optimizer.`,name:"adam_epsilon"},{anchor:"transformers.TrainingArguments.max_grad_norm",description:`<strong>max_grad_norm</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; Maximum gradient norm (for gradient clipping).`,name:"max_grad_norm"},{anchor:"transformers.TrainingArguments.num_train_epochs(float,",description:`<strong>num_train_epochs(<code>float</code>,</strong> <em>optional</em>, defaults to 3.0) &#x2014; Total number of training epochs to perform (if not an integer, will perform the decimal part percents of the last epoch before stopping training).`,name:"num_train_epochs(float,"},{anchor:"transformers.TrainingArguments.max_steps",description:`<strong>max_steps</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; If set to a positive number, the total number of training steps to perform. Overrides <code>num_train_epochs</code>. In case of using a finite iterable dataset the training may stop before reaching the set number of steps when all data is exhausted`,name:"max_steps"},{anchor:"transformers.TrainingArguments.lr_scheduler_type",description:`<strong>lr_scheduler_type</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/main_classes/optimizer_schedules#transformers.SchedulerType">SchedulerType</a>, <em>optional</em>, defaults to <code>&quot;linear&quot;</code>) &#x2014; The scheduler type to use. See the documentation of <a href="/docs/transformers/pr_16143/en/main_classes/optimizer_schedules#transformers.SchedulerType">SchedulerType</a> for all possible values.`,name:"lr_scheduler_type"},{anchor:"transformers.TrainingArguments.warmup_ratio",description:`<strong>warmup_ratio</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; Ratio of total training steps used for a linear warmup from 0 to <code>learning_rate</code>.`,name:"warmup_ratio"},{anchor:"transformers.TrainingArguments.warmup_steps",description:`<strong>warmup_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; Number of steps used for a linear warmup from 0 to <code>learning_rate</code>. Overrides any effect of <code>warmup_ratio</code>.`,name:"warmup_steps"},{anchor:"transformers.TrainingArguments.log_level",description:`<strong>log_level</strong> (<code>str</code>, <em>optional</em>, defaults to <code>passive</code>) &#x2014; Logger log level to use on the main process. Possible choices are the log levels as strings: &#x2018;debug&#x2019;, &#x2018;info&#x2019;, &#x2018;warning&#x2019;, &#x2018;error&#x2019; and &#x2018;critical&#x2019;, plus a &#x2018;passive&#x2019; level which doesn&#x2019;t set anything and lets the application set the level.`,name:"log_level"},{anchor:"transformers.TrainingArguments.log_level_replica",description:`<strong>log_level_replica</strong> (<code>str</code>, <em>optional</em>, defaults to <code>passive</code>) &#x2014; Logger log level to use on replicas. Same choices as <code>log_level</code>&#x201D;`,name:"log_level_replica"},{anchor:"transformers.TrainingArguments.log_on_each_node",description:`<strong>log_on_each_node</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; In multinode distributed training, whether to log using <code>log_level</code> once per node, or only on the main node.`,name:"log_on_each_node"},{anchor:"transformers.TrainingArguments.logging_dir",description:`<strong>logging_dir</strong> (<code>str</code>, <em>optional</em>) &#x2014; <a href="https://www.tensorflow.org/tensorboard" rel="nofollow">TensorBoard</a> log directory. Will default to *output_dir/runs/<strong>CURRENT_DATETIME_HOSTNAME*</strong>.`,name:"logging_dir"},{anchor:"transformers.TrainingArguments.logging_strategy",description:`<strong>logging_strategy</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/trainer_utils#transformers.IntervalStrategy">IntervalStrategy</a>, <em>optional</em>, defaults to <code>&quot;steps&quot;</code>) &#x2014; The logging strategy to adopt during training. Possible values are:</p> <ul> <li><code>&quot;no&quot;</code>: No logging is done during training.</li> <li><code>&quot;epoch&quot;</code>: Logging is done at the end of each epoch.</li> <li><code>&quot;steps&quot;</code>: Logging is done every <code>logging_steps</code>.</li> </ul>`,name:"logging_strategy"},{anchor:"transformers.TrainingArguments.logging_first_step",description:`<strong>logging_first_step</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to log and evaluate the first <code>global_step</code> or not.`,name:"logging_first_step"},{anchor:"transformers.TrainingArguments.logging_steps",description:`<strong>logging_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 500) &#x2014; Number of update steps between two logs if <code>logging_strategy=&quot;steps&quot;</code>.`,name:"logging_steps"},{anchor:"transformers.TrainingArguments.logging_nan_inf_filter",description:`<strong>logging_nan_inf_filter</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to filter <code>nan</code> and <code>inf</code> losses for logging. If set to <code>True</code> the loss of every step that is <code>nan</code> or <code>inf</code> is filtered and the average loss of the current logging window is taken instead.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p><code>logging_nan_inf_filter</code> only influences the logging of loss values, it does not change the behavior the gradient is computed or applied to the model.</p> </div>`,name:"logging_nan_inf_filter"},{anchor:"transformers.TrainingArguments.save_strategy",description:`<strong>save_strategy</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/trainer_utils#transformers.IntervalStrategy">IntervalStrategy</a>, <em>optional</em>, defaults to <code>&quot;steps&quot;</code>) &#x2014; The checkpoint save strategy to adopt during training. Possible values are:</p> <ul> <li><code>&quot;no&quot;</code>: No save is done during training.</li> <li><code>&quot;epoch&quot;</code>: Save is done at the end of each epoch.</li> <li><code>&quot;steps&quot;</code>: Save is done every <code>save_steps</code>.</li> </ul>`,name:"save_strategy"},{anchor:"transformers.TrainingArguments.save_steps",description:`<strong>save_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 500) &#x2014; Number of updates steps before two checkpoint saves if <code>save_strategy=&quot;steps&quot;</code>.`,name:"save_steps"},{anchor:"transformers.TrainingArguments.save_total_limit",description:`<strong>save_total_limit</strong> (<code>int</code>, <em>optional</em>) &#x2014; If a value is passed, will limit the total amount of checkpoints. Deletes the older checkpoints in <code>output_dir</code>.`,name:"save_total_limit"},{anchor:"transformers.TrainingArguments.save_on_each_node",description:`<strong>save_on_each_node</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; When doing multi-node distributed training, whether to save models and checkpoints on each node, or only on the main one.</p> <p>This should not be activated when the different nodes use the same storage as the files will be saved with the same names for each node.`,name:"save_on_each_node"},{anchor:"transformers.TrainingArguments.no_cuda",description:`<strong>no_cuda</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to not use CUDA even when it is available or not.`,name:"no_cuda"},{anchor:"transformers.TrainingArguments.seed",description:`<strong>seed</strong> (<code>int</code>, <em>optional</em>, defaults to 42) &#x2014; Random seed that will be set at the beginning of training. To ensure reproducibility across runs, use the <code>model_init</code> function to instantiate the model if it has some randomly initialized parameters.`,name:"seed"},{anchor:"transformers.TrainingArguments.data_seed",description:`<strong>data_seed</strong> (<code>int</code>, <em>optional</em>) &#x2014; Random seed to be used with data samplers. If not set, random generators for data sampling will use the same seed as <code>seed</code>. This can be used to ensure reproducibility of data sampling, independent of the model seed.`,name:"data_seed"},{anchor:"transformers.TrainingArguments.bf16",description:`<strong>bf16</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use bf16 16-bit (mixed) precision training instead of 32-bit training. Requires Ampere or higher NVIDIA architecture. This is an experimental API and it may change.`,name:"bf16"},{anchor:"transformers.TrainingArguments.fp16",description:`<strong>fp16</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use fp16 16-bit (mixed) precision training instead of 32-bit training.`,name:"fp16"},{anchor:"transformers.TrainingArguments.fp16_opt_level",description:`<strong>fp16_opt_level</strong> (<code>str</code>, <em>optional</em>, defaults to &#x2018;O1&#x2019;) &#x2014; For <code>fp16</code> training, Apex AMP optimization level selected in [&#x2018;O0&#x2019;, &#x2018;O1&#x2019;, &#x2018;O2&#x2019;, and &#x2018;O3&#x2019;]. See details on the <a href="https://nvidia.github.io/apex/amp" rel="nofollow">Apex documentation</a>.`,name:"fp16_opt_level"},{anchor:"transformers.TrainingArguments.fp16_backend",description:`<strong>fp16_backend</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;auto&quot;</code>) &#x2014; This argument is deprecated. Use <code>half_precision_backend</code> instead.`,name:"fp16_backend"},{anchor:"transformers.TrainingArguments.half_precision_backend",description:`<strong>half_precision_backend</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;auto&quot;</code>) &#x2014; The backend to use for mixed precision training. Must be one of <code>&quot;auto&quot;</code>, <code>&quot;amp&quot;</code> or <code>&quot;apex&quot;</code>. <code>&quot;auto&quot;</code> will use AMP or APEX depending on the PyTorch version detected, while the other choices will force the requested backend.`,name:"half_precision_backend"},{anchor:"transformers.TrainingArguments.bf16_full_eval",description:`<strong>bf16_full_eval</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use full bfloat16 evaluation instead of 32-bit. This will be faster and save memory but can harm metric values. This is an experimental API and it may change.`,name:"bf16_full_eval"},{anchor:"transformers.TrainingArguments.fp16_full_eval",description:`<strong>fp16_full_eval</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use full float16 evaluation instead of 32-bit. This will be faster and save memory but can harm metric values.`,name:"fp16_full_eval"},{anchor:"transformers.TrainingArguments.tf32",description:`<strong>tf32</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to enable tf32 mode, available in Ampere and newer GPU architectures. This is an experimental API and it may change.`,name:"tf32"},{anchor:"transformers.TrainingArguments.local_rank",description:`<strong>local_rank</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Rank of the process during distributed training.`,name:"local_rank"},{anchor:"transformers.TrainingArguments.xpu_backend",description:`<strong>xpu_backend</strong> (<code>str</code>, <em>optional</em>) &#x2014; The backend to use for xpu distributed training. Must be one of <code>&quot;mpi&quot;</code> or <code>&quot;ccl&quot;</code>.`,name:"xpu_backend"},{anchor:"transformers.TrainingArguments.tpu_num_cores",description:`<strong>tpu_num_cores</strong> (<code>int</code>, <em>optional</em>) &#x2014; When training on TPU, the number of TPU cores (automatically passed by launcher script).`,name:"tpu_num_cores"},{anchor:"transformers.TrainingArguments.dataloader_drop_last",description:`<strong>dataloader_drop_last</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to drop the last incomplete batch (if the length of the dataset is not divisible by the batch size) or not.`,name:"dataloader_drop_last"},{anchor:"transformers.TrainingArguments.eval_steps",description:`<strong>eval_steps</strong> (<code>int</code>, <em>optional</em>) &#x2014; Number of update steps between two evaluations if <code>evaluation_strategy=&quot;steps&quot;</code>. Will default to the same value as <code>logging_steps</code> if not set.`,name:"eval_steps"},{anchor:"transformers.TrainingArguments.dataloader_num_workers",description:`<strong>dataloader_num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; Number of subprocesses to use for data loading (PyTorch only). 0 means that the data will be loaded in the main process.`,name:"dataloader_num_workers"},{anchor:"transformers.TrainingArguments.past_index",description:`<strong>past_index</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Some models like <a href="../model_doc/transformerxl">TransformerXL</a> or <a href="../model_doc/xlnet">XLNet</a> can make use of the past hidden states for their predictions. If this argument is set to a positive int, the <code>Trainer</code> will use the corresponding output (usually index 2) as the past state and feed it to the model at the next training step under the keyword argument <code>mems</code>.`,name:"past_index"},{anchor:"transformers.TrainingArguments.run_name",description:`<strong>run_name</strong> (<code>str</code>, <em>optional</em>) &#x2014; A descriptor for the run. Typically used for <a href="https://www.wandb.com/" rel="nofollow">wandb</a> and <a href="https://www.mlflow.org/" rel="nofollow">mlflow</a> logging.`,name:"run_name"},{anchor:"transformers.TrainingArguments.disable_tqdm",description:`<strong>disable_tqdm</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to disable the tqdm progress bars and table of metrics produced by <code>NotebookTrainingTracker</code> in Jupyter Notebooks. Will default to <code>True</code> if the logging level is set to warn or lower (default), <code>False</code> otherwise.`,name:"disable_tqdm"},{anchor:"transformers.TrainingArguments.remove_unused_columns",description:`<strong>remove_unused_columns</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If using <code>datasets.Dataset</code> datasets, whether or not to automatically remove the columns unused by the model forward method.</p> <p>(Note that this behavior is not implemented for <code>TFTrainer</code>yet.)`,name:"remove_unused_columns"},{anchor:"transformers.TrainingArguments.label_names",description:`<strong>label_names</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; The list of keys in your dictionary of inputs that correspond to the labels.</p> <p>Will eventually default to <code>[&quot;labels&quot;]</code> except if the model used is one of the <code>XxxForQuestionAnswering</code> in which case it will default to <code>[&quot;start_positions&quot;, &quot;end_positions&quot;]</code>.`,name:"label_names"},{anchor:"transformers.TrainingArguments.load_best_model_at_end",description:`<strong>load_best_model_at_end</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to load the best model found during training at the end of training.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When set to <code>True</code>, the parameters <code>save_strategy</code> needs to be the same as <code>eval_strategy</code>, and in the case it is &#x201C;steps&#x201D;, <code>save_steps</code> must be a round multiple of <code>eval_steps</code>.</p> </div>`,name:"load_best_model_at_end"},{anchor:"transformers.TrainingArguments.metric_for_best_model",description:`<strong>metric_for_best_model</strong> (<code>str</code>, <em>optional</em>) &#x2014; Use in conjunction with <code>load_best_model_at_end</code> to specify the metric to use to compare two different models. Must be the name of a metric returned by the evaluation with or without the prefix <code>&quot;eval_&quot;</code>. Will default to <code>&quot;loss&quot;</code> if unspecified and <code>load_best_model_at_end=True</code> (to use the evaluation loss).</p> <p>If you set this value, <code>greater_is_better</code> will default to <code>True</code>. Don&#x2019;t forget to set it to <code>False</code> if your metric is better when lower.`,name:"metric_for_best_model"},{anchor:"transformers.TrainingArguments.greater_is_better",description:`<strong>greater_is_better</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Use in conjunction with <code>load_best_model_at_end</code> and <code>metric_for_best_model</code> to specify if better models should have a greater metric or not. Will default to:</p> <ul> <li><code>True</code> if <code>metric_for_best_model</code> is set to a value that isn&#x2019;t <code>&quot;loss&quot;</code> or <code>&quot;eval_loss&quot;</code>.</li> <li><code>False</code> if <code>metric_for_best_model</code> is not set, or set to <code>&quot;loss&quot;</code> or <code>&quot;eval_loss&quot;</code>.</li> </ul>`,name:"greater_is_better"},{anchor:"transformers.TrainingArguments.ignore_data_skip",description:`<strong>ignore_data_skip</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; When resuming training, whether or not to skip the epochs and batches to get the data loading at the same stage as in the previous training. If set to <code>True</code>, the training will begin faster (as that skipping step can take a long time) but will not yield the same results as the interrupted training would have.`,name:"ignore_data_skip"},{anchor:"transformers.TrainingArguments.sharded_ddp",description:`<strong>sharded_ddp</strong> (<code>bool</code>, <code>str</code> or list of <code>ShardedDDPOption</code> <em>optional</em>, defaults to <code>False</code>) &#x2014; Use Sharded DDP training from <a href="https://github.com/facebookresearch/fairscale" rel="nofollow">FairScale</a> (in distributed training only). This is an experimental feature.</p> <p>A list of options along the following:</p> <ul> <li><code>&quot;simple&quot;</code>: to use first instance of sharded DDP released by fairscale (<code>ShardedDDP</code>) similar to ZeRO-2.</li> <li><code>&quot;zero_dp_2&quot;</code>: to use the second instance of sharded DPP released by fairscale (<code>FullyShardedDDP</code>) in Zero-2 mode (with <code>reshard_after_forward=False</code>).</li> <li><code>&quot;zero_dp_3&quot;</code>: to use the second instance of sharded DPP released by fairscale (<code>FullyShardedDDP</code>) in Zero-3 mode (with <code>reshard_after_forward=True</code>).</li> <li><code>&quot;offload&quot;</code>: to add ZeRO-offload (only compatible with <code>&quot;zero_dp_2&quot;</code> and <code>&quot;zero_dp_3&quot;</code>).</li> </ul> <p>If a string is passed, it will be split on space. If a bool is passed, it will be converted to an empty list for <code>False</code> and <code>[&quot;simple&quot;]</code> for <code>True</code>.`,name:"sharded_ddp"},{anchor:"transformers.TrainingArguments.deepspeed",description:`<strong>deepspeed</strong> (<code>str</code> or <code>dict</code>, <em>optional</em>) &#x2014; Use <a href="https://github.com/microsoft/deepspeed" rel="nofollow">Deepspeed</a>. This is an experimental feature and its API may evolve in the future. The value is either the location of DeepSpeed json config file (e.g., <code>ds_config.json</code>) or an already loaded json file as a <code>dict</code>&#x201D;`,name:"deepspeed"},{anchor:"transformers.TrainingArguments.label_smoothing_factor",description:`<strong>label_smoothing_factor</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The label smoothing factor to use. Zero means no label smoothing, otherwise the underlying onehot-encoded labels are changed from 0s and 1s to <code>label_smoothing_factor/num_labels</code> and <code>1 - label_smoothing_factor + label_smoothing_factor/num_labels</code> respectively.`,name:"label_smoothing_factor"},{anchor:"transformers.TrainingArguments.debug",description:`<strong>debug</strong> (<code>str</code> or list of <code>DebugOption</code> <em>optional</em>, defaults to <code>&quot;&quot;</code>) &#x2014; Enable one or more debug features. This is an experimental feature.</p> <p>Possible options are:</p> <ul> <li><code>&quot;underflow_overflow&quot;</code>: detects overflow in model&#x2019;s input/outputs and reports the last frames that led to the event</li> <li><code>&quot;tpu_metrics_debug&quot;</code>: print debug metrics on TPU</li> </ul> <p>The options should be separated by whitespaces.`,name:"debug"},{anchor:"transformers.TrainingArguments.optim",description:`<strong>optim</strong> (<code>str</code> or <code>training_args.OptimizerNames</code> <em>optional</em>, defaults to <code>&quot;adamw_hf&quot;</code>) &#x2014; The optimizer to use: adamw_hf, adamw_torch, adamw_apex_fused, or adafactor.`,name:"optim"},{anchor:"transformers.TrainingArguments.adafactor",description:`<strong>adafactor</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; This argument is deprecated. Use <code>--optim adafactor</code> instead.`,name:"adafactor"},{anchor:"transformers.TrainingArguments.group_by_length",description:`<strong>group_by_length</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to group together samples of roughly the same length in the training dataset (to minimize padding applied and be more efficient). Only useful if applying dynamic padding.`,name:"group_by_length"},{anchor:"transformers.TrainingArguments.length_column_name",description:`<strong>length_column_name</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;length&quot;</code>) &#x2014; Column name for precomputed lengths. If the column exists, grouping by length will use these values rather than computing them on train startup. Ignored unless <code>group_by_length</code> is <code>True</code> and the dataset is an instance of <code>Dataset</code>.`,name:"length_column_name"},{anchor:"transformers.TrainingArguments.report_to",description:`<strong>report_to</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>, defaults to <code>&quot;all&quot;</code>) &#x2014; The list of integrations to report the results and logs to. Supported platforms are <code>&quot;azure_ml&quot;</code>, <code>&quot;comet_ml&quot;</code>, <code>&quot;mlflow&quot;</code>, <code>&quot;tensorboard&quot;</code> and <code>&quot;wandb&quot;</code>. Use <code>&quot;all&quot;</code> to report to all integrations installed, <code>&quot;none&quot;</code> for no integrations.`,name:"report_to"},{anchor:"transformers.TrainingArguments.ddp_find_unused_parameters",description:`<strong>ddp_find_unused_parameters</strong> (<code>bool</code>, <em>optional</em>) &#x2014; When using distributed training, the value of the flag <code>find_unused_parameters</code> passed to <code>DistributedDataParallel</code>. Will default to <code>False</code> if gradient checkpointing is used, <code>True</code> otherwise.`,name:"ddp_find_unused_parameters"},{anchor:"transformers.TrainingArguments.ddp_bucket_cap_mb",description:`<strong>ddp_bucket_cap_mb</strong> (<code>int</code>, <em>optional</em>) &#x2014; When using distributed training, the value of the flag <code>bucket_cap_mb</code> passed to <code>DistributedDataParallel</code>.`,name:"ddp_bucket_cap_mb"},{anchor:"transformers.TrainingArguments.dataloader_pin_memory",description:`<strong>dataloader_pin_memory</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether you want to pin memory in data loaders or not. Will default to <code>True</code>.`,name:"dataloader_pin_memory"},{anchor:"transformers.TrainingArguments.skip_memory_metrics",description:`<strong>skip_memory_metrics</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to skip adding of memory profiler reports to metrics. This is skipped by default because it slows down the training and evaluation speed.`,name:"skip_memory_metrics"},{anchor:"transformers.TrainingArguments.push_to_hub",description:`<strong>push_to_hub</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to push the model to the Hub every time the model is saved. If this is activated, <code>output_dir</code> will begin a git directory synced with the the repo (determined by <code>hub_model_id</code>) and the content will be pushed each time a save is triggered (depending on your <code>save_strategy</code>). Calling <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer.save_model">save_model()</a> will also trigger a push.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p>If <code>output_dir</code> exists, it needs to be a local clone of the repository to which the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> will be pushed.</p> </div>`,name:"push_to_hub"},{anchor:"transformers.TrainingArguments.resume_from_checkpoint",description:`<strong>resume_from_checkpoint</strong> (<code>str</code>, <em>optional</em>) &#x2014; The path to a folder with a valid checkpoint for your model. This argument is not directly used by <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>, it&#x2019;s intended to be used by your training/evaluation scripts instead. See the <a href="https://github.com/huggingface/transformers/tree/master/examples" rel="nofollow">example scripts</a> for more details.`,name:"resume_from_checkpoint"},{anchor:"transformers.TrainingArguments.hub_model_id",description:`<strong>hub_model_id</strong> (<code>str</code>, <em>optional</em>) &#x2014; The name of the repository to keep in sync with the local <em>output_dir</em>. It can be a simple model ID in which case the model will be pushed in your namespace. Otherwise it should be the whole repository name, for instance <code>&quot;user_name/model&quot;</code>, which allows you to push to an organization you are a member of with <code>&quot;organization_name/model&quot;</code>. Will default to <code>user_name/output_dir_name</code> with <em>output_dir_name</em> being the name of <code>output_dir</code>.</p> <p>Will default to to the name of <code>output_dir</code>.`,name:"hub_model_id"},{anchor:"transformers.TrainingArguments.hub_strategy",description:`<strong>hub_strategy</strong> (<code>str</code> or <code>HubStrategy</code> <em>optional</em>, defaults to <code>&quot;every_save&quot;</code>) &#x2014; Defines the scope of what is pushed to the Hub and when. Possible values are:</p> <ul> <li><code>&quot;end&quot;</code>: push the model, its configuration, the tokenizer (if passed along to the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>) and a draft of a model card when the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer.save_model">save_model()</a> method is called.</li> <li><code>&quot;every_save&quot;</code>: push the model, its configuration, the tokenizer (if passed along to the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>) and a draft of a model card each time there is a model save. The pushes are asynchronous to not block training, and in case the save are very frequent, a new push is only attempted if the previous one is finished. A last push is made with the final model at the end of training.</li> <li><code>&quot;checkpoint&quot;</code>: like <code>&quot;every_save&quot;</code> but the latest checkpoint is also pushed in a subfolder named last-checkpoint, allowing you to resume training easily with <code>trainer.train(resume_from_checkpoint=&quot;last-checkpoint&quot;)</code>.</li> <li><code>&quot;all_checkpoints&quot;</code>: like <code>&quot;checkpoint&quot;</code> but all checkpoints are pushed like they appear in the output folder (so you will get one checkpoint folder per folder in your final repository)</li> </ul>`,name:"hub_strategy"},{anchor:"transformers.TrainingArguments.hub_token",description:`<strong>hub_token</strong> (<code>str</code>, <em>optional</em>) &#x2014; The token to use to push the model to the Hub. Will default to the token in the cache folder obtained with <code>huggingface-cli login</code>.`,name:"hub_token"},{anchor:"transformers.TrainingArguments.gradient_checkpointing",description:`<strong>gradient_checkpointing</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; If True, use gradient checkpointing to save memory at the expense of slower backward pass.`,name:"gradient_checkpointing"}]}}),An=new $({props:{name:"get_process_log_level",anchor:"transformers.TrainingArguments.get_process_log_level",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/training_args.py#L1199"}}),qn=new $({props:{name:"get_warmup_steps",anchor:"transformers.TrainingArguments.get_warmup_steps",parameters:[{name:"num_training_steps",val:": int"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/training_args.py#L1285"}}),Pn=new $({props:{name:"main_process_first",anchor:"transformers.TrainingArguments.main_process_first",parameters:[{name:"local",val:" = True"},{name:"desc",val:" = 'work'"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/training_args.py#L1230",parametersDescription:[{anchor:"transformers.TrainingArguments.main_process_first.local",description:`<strong>local</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; if <code>True</code> first means process of rank 0 of each node if <code>False</code> first means process of rank 0 of node rank 0 In multi-node environment with a shared filesystem you most likely will want to use <code>local=False</code> so that only the main process of the first node will do the processing. If however, the filesystem is not shared, then the main process of each node will need to do the processing, which is the default behavior.`,name:"local"},{anchor:"transformers.TrainingArguments.main_process_first.desc",description:`<strong>desc</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;work&quot;</code>) &#x2014; a work description to be used in debug logs`,name:"desc"}]}}),Sn=new $({props:{name:"to_dict",anchor:"transformers.TrainingArguments.to_dict",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/training_args.py#L1294"}}),Cn=new $({props:{name:"to_json_string",anchor:"transformers.TrainingArguments.to_json_string",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/training_args.py#L1309"}}),In=new $({props:{name:"to_sanitized_dict",anchor:"transformers.TrainingArguments.to_sanitized_dict",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/training_args.py#L1315"}}),Un=new se({}),Nn=new $({props:{name:"class transformers.Seq2SeqTrainingArguments",anchor:"transformers.Seq2SeqTrainingArguments",parameters:[{name:"output_dir",val:": str"},{name:"overwrite_output_dir",val:": bool = False"},{name:"do_train",val:": bool = False"},{name:"do_eval",val:": bool = False"},{name:"do_predict",val:": bool = False"},{name:"evaluation_strategy",val:": IntervalStrategy = 'no'"},{name:"prediction_loss_only",val:": bool = False"},{name:"per_device_train_batch_size",val:": int = 8"},{name:"per_device_eval_batch_size",val:": int = 8"},{name:"per_gpu_train_batch_size",val:": typing.Optional[int] = None"},{name:"per_gpu_eval_batch_size",val:": typing.Optional[int] = None"},{name:"gradient_accumulation_steps",val:": int = 1"},{name:"eval_accumulation_steps",val:": typing.Optional[int] = None"},{name:"learning_rate",val:": float = 5e-05"},{name:"weight_decay",val:": float = 0.0"},{name:"adam_beta1",val:": float = 0.9"},{name:"adam_beta2",val:": float = 0.999"},{name:"adam_epsilon",val:": float = 1e-08"},{name:"max_grad_norm",val:": float = 1.0"},{name:"num_train_epochs",val:": float = 3.0"},{name:"max_steps",val:": int = -1"},{name:"lr_scheduler_type",val:": SchedulerType = 'linear'"},{name:"warmup_ratio",val:": float = 0.0"},{name:"warmup_steps",val:": int = 0"},{name:"log_level",val:": typing.Optional[str] = 'passive'"},{name:"log_level_replica",val:": typing.Optional[str] = 'passive'"},{name:"log_on_each_node",val:": bool = True"},{name:"logging_dir",val:": typing.Optional[str] = None"},{name:"logging_strategy",val:": IntervalStrategy = 'steps'"},{name:"logging_first_step",val:": bool = False"},{name:"logging_steps",val:": int = 500"},{name:"logging_nan_inf_filter",val:": str = True"},{name:"save_strategy",val:": IntervalStrategy = 'steps'"},{name:"save_steps",val:": int = 500"},{name:"save_total_limit",val:": typing.Optional[int] = None"},{name:"save_on_each_node",val:": bool = False"},{name:"no_cuda",val:": bool = False"},{name:"seed",val:": int = 42"},{name:"data_seed",val:": int = None"},{name:"bf16",val:": bool = False"},{name:"fp16",val:": bool = False"},{name:"fp16_opt_level",val:": str = 'O1'"},{name:"half_precision_backend",val:": str = 'auto'"},{name:"bf16_full_eval",val:": bool = False"},{name:"fp16_full_eval",val:": bool = False"},{name:"tf32",val:": bool = None"},{name:"local_rank",val:": int = -1"},{name:"xpu_backend",val:": str = None"},{name:"tpu_num_cores",val:": typing.Optional[int] = None"},{name:"tpu_metrics_debug",val:": bool = False"},{name:"debug",val:": str = ''"},{name:"dataloader_drop_last",val:": bool = False"},{name:"eval_steps",val:": int = None"},{name:"dataloader_num_workers",val:": int = 0"},{name:"past_index",val:": int = -1"},{name:"run_name",val:": typing.Optional[str] = None"},{name:"disable_tqdm",val:": typing.Optional[bool] = None"},{name:"remove_unused_columns",val:": typing.Optional[bool] = True"},{name:"label_names",val:": typing.Optional[typing.List[str]] = None"},{name:"load_best_model_at_end",val:": typing.Optional[bool] = False"},{name:"metric_for_best_model",val:": typing.Optional[str] = None"},{name:"greater_is_better",val:": typing.Optional[bool] = None"},{name:"ignore_data_skip",val:": bool = False"},{name:"sharded_ddp",val:": str = ''"},{name:"deepspeed",val:": typing.Optional[str] = None"},{name:"label_smoothing_factor",val:": float = 0.0"},{name:"optim",val:": OptimizerNames = 'adamw_hf'"},{name:"adafactor",val:": bool = False"},{name:"group_by_length",val:": bool = False"},{name:"length_column_name",val:": typing.Optional[str] = 'length'"},{name:"report_to",val:": typing.Optional[typing.List[str]] = None"},{name:"ddp_find_unused_parameters",val:": typing.Optional[bool] = None"},{name:"ddp_bucket_cap_mb",val:": typing.Optional[int] = None"},{name:"dataloader_pin_memory",val:": bool = True"},{name:"skip_memory_metrics",val:": bool = True"},{name:"use_legacy_prediction_loop",val:": bool = False"},{name:"push_to_hub",val:": bool = False"},{name:"resume_from_checkpoint",val:": typing.Optional[str] = None"},{name:"hub_model_id",val:": str = None"},{name:"hub_strategy",val:": HubStrategy = 'every_save'"},{name:"hub_token",val:": str = None"},{name:"gradient_checkpointing",val:": bool = False"},{name:"fp16_backend",val:": str = 'auto'"},{name:"push_to_hub_model_id",val:": str = None"},{name:"push_to_hub_organization",val:": str = None"},{name:"push_to_hub_token",val:": str = None"},{name:"mp_parameters",val:": str = ''"},{name:"sortish_sampler",val:": bool = False"},{name:"predict_with_generate",val:": bool = False"},{name:"generation_max_length",val:": typing.Optional[int] = None"},{name:"generation_num_beams",val:": typing.Optional[int] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/training_args_seq2seq.py#L28",parametersDescription:[{anchor:"transformers.Seq2SeqTrainingArguments.output_dir",description:`<strong>output_dir</strong> (<code>str</code>) &#x2014; The output directory where the model predictions and checkpoints will be written.`,name:"output_dir"},{anchor:"transformers.Seq2SeqTrainingArguments.overwrite_output_dir",description:`<strong>overwrite_output_dir</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; If <code>True</code>, overwrite the content of the output directory. Use this to continue training if <code>output_dir</code> points to a checkpoint directory.`,name:"overwrite_output_dir"},{anchor:"transformers.Seq2SeqTrainingArguments.do_train",description:`<strong>do_train</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to run training or not. This argument is not directly used by <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>, it&#x2019;s intended to be used by your training/evaluation scripts instead. See the <a href="https://github.com/huggingface/transformers/tree/master/examples" rel="nofollow">example scripts</a> for more details.`,name:"do_train"},{anchor:"transformers.Seq2SeqTrainingArguments.do_eval",description:`<strong>do_eval</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to run evaluation on the validation set or not. Will be set to <code>True</code> if <code>evaluation_strategy</code> is different from <code>&quot;no&quot;</code>. This argument is not directly used by <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>, it&#x2019;s intended to be used by your training/evaluation scripts instead. See the <a href="https://github.com/huggingface/transformers/tree/master/examples" rel="nofollow">example scripts</a> for more details.`,name:"do_eval"},{anchor:"transformers.Seq2SeqTrainingArguments.do_predict",description:`<strong>do_predict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to run predictions on the test set or not. This argument is not directly used by <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>, it&#x2019;s intended to be used by your training/evaluation scripts instead. See the <a href="https://github.com/huggingface/transformers/tree/master/examples" rel="nofollow">example scripts</a> for more details.`,name:"do_predict"},{anchor:"transformers.Seq2SeqTrainingArguments.evaluation_strategy",description:`<strong>evaluation_strategy</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/trainer_utils#transformers.IntervalStrategy">IntervalStrategy</a>, <em>optional</em>, defaults to <code>&quot;no&quot;</code>) &#x2014; The evaluation strategy to adopt during training. Possible values are:</p> <ul> <li><code>&quot;no&quot;</code>: No evaluation is done during training.</li> <li><code>&quot;steps&quot;</code>: Evaluation is done (and logged) every <code>eval_steps</code>.</li> <li><code>&quot;epoch&quot;</code>: Evaluation is done at the end of each epoch.</li> </ul>`,name:"evaluation_strategy"},{anchor:"transformers.Seq2SeqTrainingArguments.prediction_loss_only",description:`<strong>prediction_loss_only</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; When performing evaluation and generating predictions, only returns the loss.`,name:"prediction_loss_only"},{anchor:"transformers.Seq2SeqTrainingArguments.per_device_train_batch_size",description:`<strong>per_device_train_batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; The batch size per GPU/TPU core/CPU for training.`,name:"per_device_train_batch_size"},{anchor:"transformers.Seq2SeqTrainingArguments.per_device_eval_batch_size",description:`<strong>per_device_eval_batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; The batch size per GPU/TPU core/CPU for evaluation.`,name:"per_device_eval_batch_size"},{anchor:"transformers.Seq2SeqTrainingArguments.gradient_accumulation_steps",description:`<strong>gradient_accumulation_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Number of updates steps to accumulate the gradients for, before performing a backward/update pass.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p>When using gradient accumulation, one step is counted as one step with backward pass. Therefore, logging, evaluation, save will be conducted every <code>gradient_accumulation_steps * xxx_step</code> training examples.</p> </div>`,name:"gradient_accumulation_steps"},{anchor:"transformers.Seq2SeqTrainingArguments.eval_accumulation_steps",description:`<strong>eval_accumulation_steps</strong> (<code>int</code>, <em>optional</em>) &#x2014; Number of predictions steps to accumulate the output tensors for, before moving the results to the CPU. If left unset, the whole predictions are accumulated on GPU/TPU before being moved to the CPU (faster but requires more memory).`,name:"eval_accumulation_steps"},{anchor:"transformers.Seq2SeqTrainingArguments.learning_rate",description:`<strong>learning_rate</strong> (<code>float</code>, <em>optional</em>, defaults to 5e-5) &#x2014; The initial learning rate for <a href="/docs/transformers/pr_16143/en/main_classes/optimizer_schedules#transformers.AdamW">AdamW</a> optimizer.`,name:"learning_rate"},{anchor:"transformers.Seq2SeqTrainingArguments.weight_decay",description:`<strong>weight_decay</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; The weight decay to apply (if not zero) to all layers except all bias and LayerNorm weights in <a href="/docs/transformers/pr_16143/en/main_classes/optimizer_schedules#transformers.AdamW">AdamW</a> optimizer.`,name:"weight_decay"},{anchor:"transformers.Seq2SeqTrainingArguments.adam_beta1",description:`<strong>adam_beta1</strong> (<code>float</code>, <em>optional</em>, defaults to 0.9) &#x2014; The beta1 hyperparameter for the <a href="/docs/transformers/pr_16143/en/main_classes/optimizer_schedules#transformers.AdamW">AdamW</a> optimizer.`,name:"adam_beta1"},{anchor:"transformers.Seq2SeqTrainingArguments.adam_beta2",description:`<strong>adam_beta2</strong> (<code>float</code>, <em>optional</em>, defaults to 0.999) &#x2014; The beta2 hyperparameter for the <a href="/docs/transformers/pr_16143/en/main_classes/optimizer_schedules#transformers.AdamW">AdamW</a> optimizer.`,name:"adam_beta2"},{anchor:"transformers.Seq2SeqTrainingArguments.adam_epsilon",description:`<strong>adam_epsilon</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-8) &#x2014; The epsilon hyperparameter for the <a href="/docs/transformers/pr_16143/en/main_classes/optimizer_schedules#transformers.AdamW">AdamW</a> optimizer.`,name:"adam_epsilon"},{anchor:"transformers.Seq2SeqTrainingArguments.max_grad_norm",description:`<strong>max_grad_norm</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; Maximum gradient norm (for gradient clipping).`,name:"max_grad_norm"},{anchor:"transformers.Seq2SeqTrainingArguments.num_train_epochs(float,",description:`<strong>num_train_epochs(<code>float</code>,</strong> <em>optional</em>, defaults to 3.0) &#x2014; Total number of training epochs to perform (if not an integer, will perform the decimal part percents of the last epoch before stopping training).`,name:"num_train_epochs(float,"},{anchor:"transformers.Seq2SeqTrainingArguments.max_steps",description:`<strong>max_steps</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; If set to a positive number, the total number of training steps to perform. Overrides <code>num_train_epochs</code>. In case of using a finite iterable dataset the training may stop before reaching the set number of steps when all data is exhausted`,name:"max_steps"},{anchor:"transformers.Seq2SeqTrainingArguments.lr_scheduler_type",description:`<strong>lr_scheduler_type</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/main_classes/optimizer_schedules#transformers.SchedulerType">SchedulerType</a>, <em>optional</em>, defaults to <code>&quot;linear&quot;</code>) &#x2014; The scheduler type to use. See the documentation of <a href="/docs/transformers/pr_16143/en/main_classes/optimizer_schedules#transformers.SchedulerType">SchedulerType</a> for all possible values.`,name:"lr_scheduler_type"},{anchor:"transformers.Seq2SeqTrainingArguments.warmup_ratio",description:`<strong>warmup_ratio</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; Ratio of total training steps used for a linear warmup from 0 to <code>learning_rate</code>.`,name:"warmup_ratio"},{anchor:"transformers.Seq2SeqTrainingArguments.warmup_steps",description:`<strong>warmup_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; Number of steps used for a linear warmup from 0 to <code>learning_rate</code>. Overrides any effect of <code>warmup_ratio</code>.`,name:"warmup_steps"},{anchor:"transformers.Seq2SeqTrainingArguments.log_level",description:`<strong>log_level</strong> (<code>str</code>, <em>optional</em>, defaults to <code>passive</code>) &#x2014; Logger log level to use on the main process. Possible choices are the log levels as strings: &#x2018;debug&#x2019;, &#x2018;info&#x2019;, &#x2018;warning&#x2019;, &#x2018;error&#x2019; and &#x2018;critical&#x2019;, plus a &#x2018;passive&#x2019; level which doesn&#x2019;t set anything and lets the application set the level.`,name:"log_level"},{anchor:"transformers.Seq2SeqTrainingArguments.log_level_replica",description:`<strong>log_level_replica</strong> (<code>str</code>, <em>optional</em>, defaults to <code>passive</code>) &#x2014; Logger log level to use on replicas. Same choices as <code>log_level</code>&#x201D;`,name:"log_level_replica"},{anchor:"transformers.Seq2SeqTrainingArguments.log_on_each_node",description:`<strong>log_on_each_node</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; In multinode distributed training, whether to log using <code>log_level</code> once per node, or only on the main node.`,name:"log_on_each_node"},{anchor:"transformers.Seq2SeqTrainingArguments.logging_dir",description:`<strong>logging_dir</strong> (<code>str</code>, <em>optional</em>) &#x2014; <a href="https://www.tensorflow.org/tensorboard" rel="nofollow">TensorBoard</a> log directory. Will default to *output_dir/runs/<strong>CURRENT_DATETIME_HOSTNAME*</strong>.`,name:"logging_dir"},{anchor:"transformers.Seq2SeqTrainingArguments.logging_strategy",description:`<strong>logging_strategy</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/trainer_utils#transformers.IntervalStrategy">IntervalStrategy</a>, <em>optional</em>, defaults to <code>&quot;steps&quot;</code>) &#x2014; The logging strategy to adopt during training. Possible values are:</p> <ul> <li><code>&quot;no&quot;</code>: No logging is done during training.</li> <li><code>&quot;epoch&quot;</code>: Logging is done at the end of each epoch.</li> <li><code>&quot;steps&quot;</code>: Logging is done every <code>logging_steps</code>.</li> </ul>`,name:"logging_strategy"},{anchor:"transformers.Seq2SeqTrainingArguments.logging_first_step",description:`<strong>logging_first_step</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to log and evaluate the first <code>global_step</code> or not.`,name:"logging_first_step"},{anchor:"transformers.Seq2SeqTrainingArguments.logging_steps",description:`<strong>logging_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 500) &#x2014; Number of update steps between two logs if <code>logging_strategy=&quot;steps&quot;</code>.`,name:"logging_steps"},{anchor:"transformers.Seq2SeqTrainingArguments.logging_nan_inf_filter",description:`<strong>logging_nan_inf_filter</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to filter <code>nan</code> and <code>inf</code> losses for logging. If set to <code>True</code> the loss of every step that is <code>nan</code> or <code>inf</code> is filtered and the average loss of the current logging window is taken instead.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p><code>logging_nan_inf_filter</code> only influences the logging of loss values, it does not change the behavior the gradient is computed or applied to the model.</p> </div>`,name:"logging_nan_inf_filter"},{anchor:"transformers.Seq2SeqTrainingArguments.save_strategy",description:`<strong>save_strategy</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/trainer_utils#transformers.IntervalStrategy">IntervalStrategy</a>, <em>optional</em>, defaults to <code>&quot;steps&quot;</code>) &#x2014; The checkpoint save strategy to adopt during training. Possible values are:</p> <ul> <li><code>&quot;no&quot;</code>: No save is done during training.</li> <li><code>&quot;epoch&quot;</code>: Save is done at the end of each epoch.</li> <li><code>&quot;steps&quot;</code>: Save is done every <code>save_steps</code>.</li> </ul>`,name:"save_strategy"},{anchor:"transformers.Seq2SeqTrainingArguments.save_steps",description:`<strong>save_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 500) &#x2014; Number of updates steps before two checkpoint saves if <code>save_strategy=&quot;steps&quot;</code>.`,name:"save_steps"},{anchor:"transformers.Seq2SeqTrainingArguments.save_total_limit",description:`<strong>save_total_limit</strong> (<code>int</code>, <em>optional</em>) &#x2014; If a value is passed, will limit the total amount of checkpoints. Deletes the older checkpoints in <code>output_dir</code>.`,name:"save_total_limit"},{anchor:"transformers.Seq2SeqTrainingArguments.save_on_each_node",description:`<strong>save_on_each_node</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; When doing multi-node distributed training, whether to save models and checkpoints on each node, or only on the main one.</p> <p>This should not be activated when the different nodes use the same storage as the files will be saved with the same names for each node.`,name:"save_on_each_node"},{anchor:"transformers.Seq2SeqTrainingArguments.no_cuda",description:`<strong>no_cuda</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to not use CUDA even when it is available or not.`,name:"no_cuda"},{anchor:"transformers.Seq2SeqTrainingArguments.seed",description:`<strong>seed</strong> (<code>int</code>, <em>optional</em>, defaults to 42) &#x2014; Random seed that will be set at the beginning of training. To ensure reproducibility across runs, use the <code>model_init</code> function to instantiate the model if it has some randomly initialized parameters.`,name:"seed"},{anchor:"transformers.Seq2SeqTrainingArguments.data_seed",description:`<strong>data_seed</strong> (<code>int</code>, <em>optional</em>) &#x2014; Random seed to be used with data samplers. If not set, random generators for data sampling will use the same seed as <code>seed</code>. This can be used to ensure reproducibility of data sampling, independent of the model seed.`,name:"data_seed"},{anchor:"transformers.Seq2SeqTrainingArguments.bf16",description:`<strong>bf16</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use bf16 16-bit (mixed) precision training instead of 32-bit training. Requires Ampere or higher NVIDIA architecture. This is an experimental API and it may change.`,name:"bf16"},{anchor:"transformers.Seq2SeqTrainingArguments.fp16",description:`<strong>fp16</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use fp16 16-bit (mixed) precision training instead of 32-bit training.`,name:"fp16"},{anchor:"transformers.Seq2SeqTrainingArguments.fp16_opt_level",description:`<strong>fp16_opt_level</strong> (<code>str</code>, <em>optional</em>, defaults to &#x2018;O1&#x2019;) &#x2014; For <code>fp16</code> training, Apex AMP optimization level selected in [&#x2018;O0&#x2019;, &#x2018;O1&#x2019;, &#x2018;O2&#x2019;, and &#x2018;O3&#x2019;]. See details on the <a href="https://nvidia.github.io/apex/amp" rel="nofollow">Apex documentation</a>.`,name:"fp16_opt_level"},{anchor:"transformers.Seq2SeqTrainingArguments.fp16_backend",description:`<strong>fp16_backend</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;auto&quot;</code>) &#x2014; This argument is deprecated. Use <code>half_precision_backend</code> instead.`,name:"fp16_backend"},{anchor:"transformers.Seq2SeqTrainingArguments.half_precision_backend",description:`<strong>half_precision_backend</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;auto&quot;</code>) &#x2014; The backend to use for mixed precision training. Must be one of <code>&quot;auto&quot;</code>, <code>&quot;amp&quot;</code> or <code>&quot;apex&quot;</code>. <code>&quot;auto&quot;</code> will use AMP or APEX depending on the PyTorch version detected, while the other choices will force the requested backend.`,name:"half_precision_backend"},{anchor:"transformers.Seq2SeqTrainingArguments.bf16_full_eval",description:`<strong>bf16_full_eval</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use full bfloat16 evaluation instead of 32-bit. This will be faster and save memory but can harm metric values. This is an experimental API and it may change.`,name:"bf16_full_eval"},{anchor:"transformers.Seq2SeqTrainingArguments.fp16_full_eval",description:`<strong>fp16_full_eval</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use full float16 evaluation instead of 32-bit. This will be faster and save memory but can harm metric values.`,name:"fp16_full_eval"},{anchor:"transformers.Seq2SeqTrainingArguments.tf32",description:`<strong>tf32</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to enable tf32 mode, available in Ampere and newer GPU architectures. This is an experimental API and it may change.`,name:"tf32"},{anchor:"transformers.Seq2SeqTrainingArguments.local_rank",description:`<strong>local_rank</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Rank of the process during distributed training.`,name:"local_rank"},{anchor:"transformers.Seq2SeqTrainingArguments.xpu_backend",description:`<strong>xpu_backend</strong> (<code>str</code>, <em>optional</em>) &#x2014; The backend to use for xpu distributed training. Must be one of <code>&quot;mpi&quot;</code> or <code>&quot;ccl&quot;</code>.`,name:"xpu_backend"},{anchor:"transformers.Seq2SeqTrainingArguments.tpu_num_cores",description:`<strong>tpu_num_cores</strong> (<code>int</code>, <em>optional</em>) &#x2014; When training on TPU, the number of TPU cores (automatically passed by launcher script).`,name:"tpu_num_cores"},{anchor:"transformers.Seq2SeqTrainingArguments.dataloader_drop_last",description:`<strong>dataloader_drop_last</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to drop the last incomplete batch (if the length of the dataset is not divisible by the batch size) or not.`,name:"dataloader_drop_last"},{anchor:"transformers.Seq2SeqTrainingArguments.eval_steps",description:`<strong>eval_steps</strong> (<code>int</code>, <em>optional</em>) &#x2014; Number of update steps between two evaluations if <code>evaluation_strategy=&quot;steps&quot;</code>. Will default to the same value as <code>logging_steps</code> if not set.`,name:"eval_steps"},{anchor:"transformers.Seq2SeqTrainingArguments.dataloader_num_workers",description:`<strong>dataloader_num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; Number of subprocesses to use for data loading (PyTorch only). 0 means that the data will be loaded in the main process.`,name:"dataloader_num_workers"},{anchor:"transformers.Seq2SeqTrainingArguments.past_index",description:`<strong>past_index</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Some models like <a href="../model_doc/transformerxl">TransformerXL</a> or <a href="../model_doc/xlnet">XLNet</a> can make use of the past hidden states for their predictions. If this argument is set to a positive int, the <code>Trainer</code> will use the corresponding output (usually index 2) as the past state and feed it to the model at the next training step under the keyword argument <code>mems</code>.`,name:"past_index"},{anchor:"transformers.Seq2SeqTrainingArguments.run_name",description:`<strong>run_name</strong> (<code>str</code>, <em>optional</em>) &#x2014; A descriptor for the run. Typically used for <a href="https://www.wandb.com/" rel="nofollow">wandb</a> and <a href="https://www.mlflow.org/" rel="nofollow">mlflow</a> logging.`,name:"run_name"},{anchor:"transformers.Seq2SeqTrainingArguments.disable_tqdm",description:`<strong>disable_tqdm</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to disable the tqdm progress bars and table of metrics produced by <code>NotebookTrainingTracker</code> in Jupyter Notebooks. Will default to <code>True</code> if the logging level is set to warn or lower (default), <code>False</code> otherwise.`,name:"disable_tqdm"},{anchor:"transformers.Seq2SeqTrainingArguments.remove_unused_columns",description:`<strong>remove_unused_columns</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If using <code>datasets.Dataset</code> datasets, whether or not to automatically remove the columns unused by the model forward method.</p> <p>(Note that this behavior is not implemented for <code>TFTrainer</code>yet.)`,name:"remove_unused_columns"},{anchor:"transformers.Seq2SeqTrainingArguments.label_names",description:`<strong>label_names</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; The list of keys in your dictionary of inputs that correspond to the labels.</p> <p>Will eventually default to <code>[&quot;labels&quot;]</code> except if the model used is one of the <code>XxxForQuestionAnswering</code> in which case it will default to <code>[&quot;start_positions&quot;, &quot;end_positions&quot;]</code>.`,name:"label_names"},{anchor:"transformers.Seq2SeqTrainingArguments.load_best_model_at_end",description:`<strong>load_best_model_at_end</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to load the best model found during training at the end of training.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When set to <code>True</code>, the parameters <code>save_strategy</code> needs to be the same as <code>eval_strategy</code>, and in the case it is &#x201C;steps&#x201D;, <code>save_steps</code> must be a round multiple of <code>eval_steps</code>.</p> </div>`,name:"load_best_model_at_end"},{anchor:"transformers.Seq2SeqTrainingArguments.metric_for_best_model",description:`<strong>metric_for_best_model</strong> (<code>str</code>, <em>optional</em>) &#x2014; Use in conjunction with <code>load_best_model_at_end</code> to specify the metric to use to compare two different models. Must be the name of a metric returned by the evaluation with or without the prefix <code>&quot;eval_&quot;</code>. Will default to <code>&quot;loss&quot;</code> if unspecified and <code>load_best_model_at_end=True</code> (to use the evaluation loss).</p> <p>If you set this value, <code>greater_is_better</code> will default to <code>True</code>. Don&#x2019;t forget to set it to <code>False</code> if your metric is better when lower.`,name:"metric_for_best_model"},{anchor:"transformers.Seq2SeqTrainingArguments.greater_is_better",description:`<strong>greater_is_better</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Use in conjunction with <code>load_best_model_at_end</code> and <code>metric_for_best_model</code> to specify if better models should have a greater metric or not. Will default to:</p> <ul> <li><code>True</code> if <code>metric_for_best_model</code> is set to a value that isn&#x2019;t <code>&quot;loss&quot;</code> or <code>&quot;eval_loss&quot;</code>.</li> <li><code>False</code> if <code>metric_for_best_model</code> is not set, or set to <code>&quot;loss&quot;</code> or <code>&quot;eval_loss&quot;</code>.</li> </ul>`,name:"greater_is_better"},{anchor:"transformers.Seq2SeqTrainingArguments.ignore_data_skip",description:`<strong>ignore_data_skip</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; When resuming training, whether or not to skip the epochs and batches to get the data loading at the same stage as in the previous training. If set to <code>True</code>, the training will begin faster (as that skipping step can take a long time) but will not yield the same results as the interrupted training would have.`,name:"ignore_data_skip"},{anchor:"transformers.Seq2SeqTrainingArguments.sharded_ddp",description:`<strong>sharded_ddp</strong> (<code>bool</code>, <code>str</code> or list of <code>ShardedDDPOption</code> <em>optional</em>, defaults to <code>False</code>) &#x2014; Use Sharded DDP training from <a href="https://github.com/facebookresearch/fairscale" rel="nofollow">FairScale</a> (in distributed training only). This is an experimental feature.</p> <p>A list of options along the following:</p> <ul> <li><code>&quot;simple&quot;</code>: to use first instance of sharded DDP released by fairscale (<code>ShardedDDP</code>) similar to ZeRO-2.</li> <li><code>&quot;zero_dp_2&quot;</code>: to use the second instance of sharded DPP released by fairscale (<code>FullyShardedDDP</code>) in Zero-2 mode (with <code>reshard_after_forward=False</code>).</li> <li><code>&quot;zero_dp_3&quot;</code>: to use the second instance of sharded DPP released by fairscale (<code>FullyShardedDDP</code>) in Zero-3 mode (with <code>reshard_after_forward=True</code>).</li> <li><code>&quot;offload&quot;</code>: to add ZeRO-offload (only compatible with <code>&quot;zero_dp_2&quot;</code> and <code>&quot;zero_dp_3&quot;</code>).</li> </ul> <p>If a string is passed, it will be split on space. If a bool is passed, it will be converted to an empty list for <code>False</code> and <code>[&quot;simple&quot;]</code> for <code>True</code>.`,name:"sharded_ddp"},{anchor:"transformers.Seq2SeqTrainingArguments.deepspeed",description:`<strong>deepspeed</strong> (<code>str</code> or <code>dict</code>, <em>optional</em>) &#x2014; Use <a href="https://github.com/microsoft/deepspeed" rel="nofollow">Deepspeed</a>. This is an experimental feature and its API may evolve in the future. The value is either the location of DeepSpeed json config file (e.g., <code>ds_config.json</code>) or an already loaded json file as a <code>dict</code>&#x201D;`,name:"deepspeed"},{anchor:"transformers.Seq2SeqTrainingArguments.label_smoothing_factor",description:`<strong>label_smoothing_factor</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The label smoothing factor to use. Zero means no label smoothing, otherwise the underlying onehot-encoded labels are changed from 0s and 1s to <code>label_smoothing_factor/num_labels</code> and <code>1 - label_smoothing_factor + label_smoothing_factor/num_labels</code> respectively.`,name:"label_smoothing_factor"},{anchor:"transformers.Seq2SeqTrainingArguments.debug",description:`<strong>debug</strong> (<code>str</code> or list of <code>DebugOption</code> <em>optional</em>, defaults to <code>&quot;&quot;</code>) &#x2014; Enable one or more debug features. This is an experimental feature.</p> <p>Possible options are:</p> <ul> <li><code>&quot;underflow_overflow&quot;</code>: detects overflow in model&#x2019;s input/outputs and reports the last frames that led to the event</li> <li><code>&quot;tpu_metrics_debug&quot;</code>: print debug metrics on TPU</li> </ul> <p>The options should be separated by whitespaces.`,name:"debug"},{anchor:"transformers.Seq2SeqTrainingArguments.optim",description:`<strong>optim</strong> (<code>str</code> or <code>training_args.OptimizerNames</code> <em>optional</em>, defaults to <code>&quot;adamw_hf&quot;</code>) &#x2014; The optimizer to use: adamw_hf, adamw_torch, adamw_apex_fused, or adafactor.`,name:"optim"},{anchor:"transformers.Seq2SeqTrainingArguments.adafactor",description:`<strong>adafactor</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; This argument is deprecated. Use <code>--optim adafactor</code> instead.`,name:"adafactor"},{anchor:"transformers.Seq2SeqTrainingArguments.group_by_length",description:`<strong>group_by_length</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to group together samples of roughly the same length in the training dataset (to minimize padding applied and be more efficient). Only useful if applying dynamic padding.`,name:"group_by_length"},{anchor:"transformers.Seq2SeqTrainingArguments.length_column_name",description:`<strong>length_column_name</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;length&quot;</code>) &#x2014; Column name for precomputed lengths. If the column exists, grouping by length will use these values rather than computing them on train startup. Ignored unless <code>group_by_length</code> is <code>True</code> and the dataset is an instance of <code>Dataset</code>.`,name:"length_column_name"},{anchor:"transformers.Seq2SeqTrainingArguments.report_to",description:`<strong>report_to</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>, defaults to <code>&quot;all&quot;</code>) &#x2014; The list of integrations to report the results and logs to. Supported platforms are <code>&quot;azure_ml&quot;</code>, <code>&quot;comet_ml&quot;</code>, <code>&quot;mlflow&quot;</code>, <code>&quot;tensorboard&quot;</code> and <code>&quot;wandb&quot;</code>. Use <code>&quot;all&quot;</code> to report to all integrations installed, <code>&quot;none&quot;</code> for no integrations.`,name:"report_to"},{anchor:"transformers.Seq2SeqTrainingArguments.ddp_find_unused_parameters",description:`<strong>ddp_find_unused_parameters</strong> (<code>bool</code>, <em>optional</em>) &#x2014; When using distributed training, the value of the flag <code>find_unused_parameters</code> passed to <code>DistributedDataParallel</code>. Will default to <code>False</code> if gradient checkpointing is used, <code>True</code> otherwise.`,name:"ddp_find_unused_parameters"},{anchor:"transformers.Seq2SeqTrainingArguments.ddp_bucket_cap_mb",description:`<strong>ddp_bucket_cap_mb</strong> (<code>int</code>, <em>optional</em>) &#x2014; When using distributed training, the value of the flag <code>bucket_cap_mb</code> passed to <code>DistributedDataParallel</code>.`,name:"ddp_bucket_cap_mb"},{anchor:"transformers.Seq2SeqTrainingArguments.dataloader_pin_memory",description:`<strong>dataloader_pin_memory</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether you want to pin memory in data loaders or not. Will default to <code>True</code>.`,name:"dataloader_pin_memory"},{anchor:"transformers.Seq2SeqTrainingArguments.skip_memory_metrics",description:`<strong>skip_memory_metrics</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to skip adding of memory profiler reports to metrics. This is skipped by default because it slows down the training and evaluation speed.`,name:"skip_memory_metrics"},{anchor:"transformers.Seq2SeqTrainingArguments.push_to_hub",description:`<strong>push_to_hub</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to push the model to the Hub every time the model is saved. If this is activated, <code>output_dir</code> will begin a git directory synced with the the repo (determined by <code>hub_model_id</code>) and the content will be pushed each time a save is triggered (depending on your <code>save_strategy</code>). Calling <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer.save_model">save_model()</a> will also trigger a push.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p>If <code>output_dir</code> exists, it needs to be a local clone of the repository to which the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> will be pushed.</p> </div>`,name:"push_to_hub"},{anchor:"transformers.Seq2SeqTrainingArguments.resume_from_checkpoint",description:`<strong>resume_from_checkpoint</strong> (<code>str</code>, <em>optional</em>) &#x2014; The path to a folder with a valid checkpoint for your model. This argument is not directly used by <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>, it&#x2019;s intended to be used by your training/evaluation scripts instead. See the <a href="https://github.com/huggingface/transformers/tree/master/examples" rel="nofollow">example scripts</a> for more details.`,name:"resume_from_checkpoint"},{anchor:"transformers.Seq2SeqTrainingArguments.hub_model_id",description:`<strong>hub_model_id</strong> (<code>str</code>, <em>optional</em>) &#x2014; The name of the repository to keep in sync with the local <em>output_dir</em>. It can be a simple model ID in which case the model will be pushed in your namespace. Otherwise it should be the whole repository name, for instance <code>&quot;user_name/model&quot;</code>, which allows you to push to an organization you are a member of with <code>&quot;organization_name/model&quot;</code>. Will default to <code>user_name/output_dir_name</code> with <em>output_dir_name</em> being the name of <code>output_dir</code>.</p> <p>Will default to to the name of <code>output_dir</code>.`,name:"hub_model_id"},{anchor:"transformers.Seq2SeqTrainingArguments.hub_strategy",description:`<strong>hub_strategy</strong> (<code>str</code> or <code>HubStrategy</code> <em>optional</em>, defaults to <code>&quot;every_save&quot;</code>) &#x2014; Defines the scope of what is pushed to the Hub and when. Possible values are:</p> <ul> <li><code>&quot;end&quot;</code>: push the model, its configuration, the tokenizer (if passed along to the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>) and a draft of a model card when the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer.save_model">save_model()</a> method is called.</li> <li><code>&quot;every_save&quot;</code>: push the model, its configuration, the tokenizer (if passed along to the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>) and a draft of a model card each time there is a model save. The pushes are asynchronous to not block training, and in case the save are very frequent, a new push is only attempted if the previous one is finished. A last push is made with the final model at the end of training.</li> <li><code>&quot;checkpoint&quot;</code>: like <code>&quot;every_save&quot;</code> but the latest checkpoint is also pushed in a subfolder named last-checkpoint, allowing you to resume training easily with <code>trainer.train(resume_from_checkpoint=&quot;last-checkpoint&quot;)</code>.</li> <li><code>&quot;all_checkpoints&quot;</code>: like <code>&quot;checkpoint&quot;</code> but all checkpoints are pushed like they appear in the output folder (so you will get one checkpoint folder per folder in your final repository)</li> </ul>`,name:"hub_strategy"},{anchor:"transformers.Seq2SeqTrainingArguments.hub_token",description:`<strong>hub_token</strong> (<code>str</code>, <em>optional</em>) &#x2014; The token to use to push the model to the Hub. Will default to the token in the cache folder obtained with <code>huggingface-cli login</code>.`,name:"hub_token"},{anchor:"transformers.Seq2SeqTrainingArguments.gradient_checkpointing",description:`<strong>gradient_checkpointing</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; If True, use gradient checkpointing to save memory at the expense of slower backward pass.`,name:"gradient_checkpointing"}]}}),Fn=new se({}),Rn=new se({}),Wn=new P({props:{code:`[...] logger = logging.getLogger(__name__) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) # set the main code and the modules it uses to the same log-level according to the node log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) trainer = Trainer(...)`,highlighted:`[...] logger = logging.getLogger(__name__) <span class="hljs-comment"># Setup logging</span> logging.basicConfig( <span class="hljs-built_in">format</span>=<span class="hljs-string">&quot;%(asctime)s - %(levelname)s - %(name)s - %(message)s&quot;</span>, datefmt=<span class="hljs-string">&quot;%m/%d/%Y %H:%M:%S&quot;</span>, handlers=[logging.StreamHandler(sys.stdout)], ) <span class="hljs-comment"># set the main code and the modules it uses to the same log-level according to the node</span> log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) trainer = Trainer(...)`}}),Gn=new P({props:{code:"my_app.py ... --log_level warning --log_level_replica error",highlighted:"my_app.py ... --log_level warning --log_level_replica error"}}),jn=new P({props:{code:"my_app.py ... --log_level warning --log_level_replica error --log_on_each_node 0",highlighted:"my_app.py ... --log_level warning --log_level_replica error --log_on_each_node 0"}}),Mn=new P({props:{code:"my_app.py ... --log_level error --log_level_replica error --log_on_each_node 0",highlighted:"my_app.py ... --log_level error --log_level_replica error --log_on_each_node 0"}}),Vn=new se({}),Bn=new se({}),Zn=new P({props:{code:"python -m torch.distributed.launch --nproc_per_node=2 trainer-program.py ...",highlighted:"python -m torch.distributed.launch --nproc_per_node=2 trainer-program.py ..."}}),Kn=new P({props:{code:"accelerate launch --num_processes 2 trainer-program.py ...",highlighted:"accelerate launch --num_processes 2 trainer-program.py ..."}}),Qn=new P({props:{code:"deepspeed --num_gpus 2 trainer-program.py ...",highlighted:"deepspeed --num_gpus 2 trainer-program.py ..."}}),es=new P({props:{code:"CUDA_VISIBLE_DEVICES=0,2 python -m torch.distributed.launch trainer-program.py ...",highlighted:"CUDA_VISIBLE_DEVICES=0,2 python -m torch.distributed.launch trainer-program.py ..."}}),ts=new P({props:{code:"CUDA_VISIBLE_DEVICES=2,0 python -m torch.distributed.launch trainer-program.py ...",highlighted:"CUDA_VISIBLE_DEVICES=2,0 python -m torch.distributed.launch trainer-program.py ..."}}),rs=new P({props:{code:"CUDA_VISIBLE_DEVICES=2,0 python trainer-program.py ...",highlighted:"CUDA_VISIBLE_DEVICES=2,0 python trainer-program.py ..."}}),as=new P({props:{code:"CUDA_VISIBLE_DEVICES= python trainer-program.py ...",highlighted:"CUDA_VISIBLE_DEVICES= python trainer-program.py ..."}}),ns=new P({props:{code:`export CUDA_VISIBLE_DEVICES=0,2 python -m torch.distributed.launch trainer-program.py ...`,highlighted:`<span class="hljs-built_in">export</span> CUDA_VISIBLE_DEVICES=0,2 python -m torch.distributed.launch trainer-program.py ...`}}),is=new P({props:{code:"export CUDA_DEVICE_ORDER=PCI_BUS_ID",highlighted:'<span class="hljs-built_in">export</span> CUDA_DEVICE_ORDER=PCI_BUS_ID'}}),ds=new P({props:{code:"export CUDA_DEVICE_ORDER=FASTEST_FIRST",highlighted:'<span class="hljs-built_in">export</span> CUDA_DEVICE_ORDER=FASTEST_FIRST'}}),cs=new P({props:{code:"export CUDA_VISIBLE_DEVICES=1,0",highlighted:'<span class="hljs-built_in">export</span> CUDA_VISIBLE_DEVICES=1,0'}}),ps=new se({}),fs=new se({}),vs=new P({props:{code:`pip install fairscale pip install deepspeed`,highlighted:`pip install fairscale pip install deepspeed`}}),bs=new se({}),ys=new P({props:{code:"which nvcc",highlighted:'<span class="hljs-built_in">which</span> nvcc'}}),Es=new se({}),Ts=new P({props:{code:`/usr/local/cuda-10.2 /usr/local/cuda-11.0`,highlighted:`/usr/local/cuda-10.2 /usr/local/cuda-11.0`}}),$s=new P({props:{code:`echo $PATH echo $LD_LIBRARY_PATH`,highlighted:`<span class="hljs-built_in">echo</span> <span class="hljs-variable">$PATH</span> <span class="hljs-built_in">echo</span> <span class="hljs-variable">$LD_LIBRARY_PATH</span>`}}),ks=new P({props:{code:`export PATH=/usr/local/cuda-10.2/bin:$PATH export LD_LIBRARY_PATH=/usr/local/cuda-10.2/lib64:$LD_LIBRARY_PATH`,highlighted:`<span class="hljs-built_in">export</span> PATH=/usr/local/cuda-10.2/bin:<span class="hljs-variable">$PATH</span> <span class="hljs-built_in">export</span> LD_LIBRARY_PATH=/usr/local/cuda-10.2/lib64:<span class="hljs-variable">$LD_LIBRARY_PATH</span>`}}),xs=new se({}),As=new P({props:{code:`sudo ln -s /usr/bin/gcc-7 /usr/local/cuda-10.2/bin/gcc sudo ln -s /usr/bin/g++-7 /usr/local/cuda-10.2/bin/g++`,highlighted:`sudo <span class="hljs-built_in">ln</span> -s /usr/bin/gcc-7 /usr/local/cuda-10.2/bin/gcc sudo <span class="hljs-built_in">ln</span> -s /usr/bin/g++-7 /usr/local/cuda-10.2/bin/g++`}}),Ds=new se({}),Os=new P({props:{code:"pip install fairscale",highlighted:"pip install fairscale"}}),Cs=new P({props:{code:"pip install transformers[fairscale]",highlighted:"pip install transformers[fairscale]"}}),Ns=new P({props:{code:"pip install fairscale --no-build-isolation .",highlighted:"pip install fairscale --no-build-isolation ."}}),zs=new P({props:{code:`git clone https://github.com/facebookresearch/fairscale/ cd fairscale rm -r dist build python setup.py bdist_wheel pip uninstall -y fairscale pip install dist/fairscale-*.whl`,highlighted:`git <span class="hljs-built_in">clone</span> https://github.com/facebookresearch/fairscale/ <span class="hljs-built_in">cd</span> fairscale <span class="hljs-built_in">rm</span> -r dist build python setup.py bdist_wheel pip uninstall -y fairscale pip install dist/fairscale-*.whl`}}),Fs=new P({props:{code:`pip uninstall -y fairscale; pip install fairscale --pre \\ -f https://download.pytorch.org/whl/nightly/cu110/torch_nightly \\ --no-cache --no-build-isolation`,highlighted:`pip uninstall -y fairscale; pip install fairscale --pre \\ -f https://download.pytorch.org/whl/nightly/cu110/torch_nightly \\ --no-cache --no-build-isolation`}}),Rs=new P({props:{code:`pip install -v --disable-pip-version-check . \\ -f https://download.pytorch.org/whl/nightly/cu110/torch_nightly --pre`,highlighted:`pip install -v --disable-pip-version-check . \\ -f https://download.pytorch.org/whl/nightly/cu110/torch_nightly --pre`}}),js=new P({props:{code:`python -m torch.distributed.launch --nproc_per_node=2 examples/pytorch/translation/run_translation.py \\ --model_name_or_path t5-small --per_device_train_batch_size 1 \\ --output_dir output_dir --overwrite_output_dir \\ --do_train --max_train_samples 500 --num_train_epochs 1 \\ --dataset_name wmt16 --dataset_config "ro-en" \\ --source_lang en --target_lang ro \\ --fp16 --sharded_ddp simple`,highlighted:`python -m torch.distributed.launch --nproc_per_node=2 examples/pytorch/translation/run_translation.py \\ --model_name_or_path t5-small --per_device_train_batch_size 1 \\ --output_dir output_dir --overwrite_output_dir \\ --do_train --max_train_samples 500 --num_train_epochs 1 \\ --dataset_name wmt16 --dataset_config <span class="hljs-string">&quot;ro-en&quot;</span> \\ --source_lang en --target_lang ro \\ --fp16 --sharded_ddp simple`}}),Bs=new P({props:{code:`python -m torch.distributed.launch --nproc_per_node=2 examples/pytorch/translation/run_translation.py \\ --model_name_or_path t5-small --per_device_train_batch_size 1 \\ --output_dir output_dir --overwrite_output_dir \\ --do_train --max_train_samples 500 --num_train_epochs 1 \\ --dataset_name wmt16 --dataset_config "ro-en" \\ --source_lang en --target_lang ro \\ --fp16 --sharded_ddp zero_dp_2`,highlighted:`python -m torch.distributed.launch --nproc_per_node=2 examples/pytorch/translation/run_translation.py \\ --model_name_or_path t5-small --per_device_train_batch_size 1 \\ --output_dir output_dir --overwrite_output_dir \\ --do_train --max_train_samples 500 --num_train_epochs 1 \\ --dataset_name wmt16 --dataset_config <span class="hljs-string">&quot;ro-en&quot;</span> \\ --source_lang en --target_lang ro \\ --fp16 --sharded_ddp zero_dp_2`}}),{c(){T=n("meta"),L=l(),x=n("h1"),S=n("a"),fe=n("span"),h(R.$$.fragment),W=l(),V=n("span"),ge=r("Trainer"),ee=l(),G=n("p"),ie=r("The "),le=n("a"),te=r("Trainer"),de=r(" class provides an API for feature-complete training in PyTorch for most standard use cases. It\u2019s used in most of the "),Y=n("a"),Ze=r("example scripts"),_e=r("."),N=l(),C=n("p"),at=r("Before instantiating your "),oe=n("a"),nt=r("Trainer"),st=r(", create a "),ve=n("a"),sa=r("TrainingArguments"),ia=r(" to access all the points of customization during training."),Je=l(),Ae=n("p"),la=r("The API supports distributed training on multiple GPUs/TPUs, mixed precision through "),be=n("a"),da=r("NVIDIA Apex"),ca=r(" and Native AMP for PyTorch."),Z=l(),H=n("p"),Qs=r("The "),ye=n("a"),Lo=r("Trainer"),ei=r(" contains the basic training loop which supports the above features. To inject custom behavior you can subclass them and override the following methods:"),Kt=l(),D=n("ul"),B=n("li"),Fo=n("strong"),ti=r("get_train_dataloader"),oi=r(" \u2014 Creates the training DataLoader."),ri=l(),ai=n("li"),sc=n("strong"),Qb=r("get_eval_dataloader"),ey=r(" \u2014 Creates the evaluation DataLoader."),ty=l(),ni=n("li"),ic=n("strong"),oy=r("get_test_dataloader"),ry=r(" \u2014 Creates the test DataLoader."),ay=l(),si=n("li"),lc=n("strong"),ny=r("log"),sy=r(" \u2014 Logs information on the various objects watching training."),iy=l(),it=n("li"),dc=n("strong"),ly=r("create_optimizer_and_scheduler"),dy=r(` \u2014 Sets up the optimizer and learning rate scheduler if they were not passed at init. Note, that you can also subclass or override the `),cc=n("code"),cy=r("create_optimizer"),py=r(" and "),pc=n("code"),my=r("create_scheduler"),hy=r(` methods separately.`),uy=l(),ii=n("li"),mc=n("strong"),fy=r("create_optimizer"),gy=r(" \u2014 Sets up the optimizer if it wasn\u2019t passed at init."),_y=l(),li=n("li"),hc=n("strong"),vy=r("create_scheduler"),by=r(" \u2014 Sets up the learning rate scheduler if it wasn\u2019t passed at init."),yy=l(),di=n("li"),uc=n("strong"),wy=r("compute_loss"),Ey=r(" - Computes the loss on a batch of training inputs."),Ty=l(),ci=n("li"),fc=n("strong"),$y=r("training_step"),ky=r(" \u2014 Performs a training step."),xy=l(),pi=n("li"),gc=n("strong"),Ay=r("prediction_step"),Dy=r(" \u2014 Performs an evaluation/test step."),qy=l(),mi=n("li"),_c=n("strong"),Py=r("evaluate"),Sy=r(" \u2014 Runs an evaluation loop and returns metrics."),Oy=l(),hi=n("li"),vc=n("strong"),Cy=r("predict"),Iy=r(" \u2014 Returns predictions (with metrics if labels are available) on a test set."),Og=l(),h(Ro.$$.fragment),Cg=l(),Wo=n("p"),Uy=r("Here is an example of how to customize "),ui=n("a"),Ny=r("Trainer"),zy=r(" to use a weighted loss (useful when you have an unbalanced training set):"),Ig=l(),h(pa.$$.fragment),Ug=l(),lt=n("p"),Ly=r("Another way to customize the training loop behavior for the PyTorch "),fi=n("a"),Fy=r("Trainer"),Ry=r(" is to use "),gi=n("a"),Wy=r("callbacks"),Gy=r(" that can inspect the training loop state (for progress reporting, logging on TensorBoard or other ML platforms\u2026) and take decisions (like early stopping)."),Ng=l(),Qt=n("h2"),Go=n("a"),bc=n("span"),h(ma.$$.fragment),jy=l(),yc=n("span"),My=r("Trainer"),zg=l(),b=n("div"),h(ha.$$.fragment),Vy=l(),wc=n("p"),Hy=r("Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for \u{1F917} Transformers."),By=l(),Ec=n("p"),Yy=r("Important attributes:"),Zy=l(),we=n("ul"),jo=n("li"),Tc=n("strong"),Jy=r("model"),Xy=r(" \u2014 Always points to the core model. If using a transformers model, it will be a "),_i=n("a"),Ky=r("PreTrainedModel"),Qy=r(` subclass.`),e2=l(),J=n("li"),$c=n("strong"),t2=r("model_wrapped"),o2=r(` \u2014 Always points to the most external model in case one or more other modules wrap the original model. This is the model that should be used for the forward pass. For example, under `),kc=n("code"),r2=r("DeepSpeed"),a2=r(`, the inner model is wrapped in `),xc=n("code"),n2=r("DeepSpeed"),s2=r(" and then again in "),Ac=n("code"),i2=r("torch.nn.DistributedDataParallel"),l2=r(`. If the inner model hasn\u2019t been wrapped, then `),Dc=n("code"),d2=r("self.model_wrapped"),c2=r(" is the same as "),qc=n("code"),p2=r("self.model"),m2=r("."),h2=l(),vi=n("li"),Pc=n("strong"),u2=r("is_model_parallel"),f2=r(` \u2014 Whether or not a model has been switched to a model parallel mode (different from data parallelism, this means some of the model layers are split on different GPUs).`),g2=l(),De=n("li"),Sc=n("strong"),_2=r("place_model_on_device"),v2=r(` \u2014 Whether or not to automatically place the model on the device - it will be set to `),Oc=n("code"),b2=r("False"),y2=r(` if model parallel or deepspeed is used, or if the default `),Cc=n("code"),w2=r("TrainingArguments.place_model_on_device"),E2=r(" is overridden to return "),Ic=n("code"),T2=r("False"),$2=r(" ."),k2=l(),qe=n("li"),Uc=n("strong"),x2=r("is_in_train"),A2=r(" \u2014 Whether or not a model is currently running "),Nc=n("code"),D2=r("train"),q2=r(" (e.g. when "),zc=n("code"),P2=r("evaluate"),S2=r(` is called while in `),Lc=n("code"),O2=r("train"),C2=r(")"),I2=l(),Mo=n("div"),h(ua.$$.fragment),U2=l(),fa=n("p"),N2=r("Add a callback to the current list of "),Fc=n("code"),z2=r("TrainerCallback"),L2=r("."),F2=l(),Vo=n("div"),h(ga.$$.fragment),R2=l(),_a=n("p"),W2=r("A helper wrapper that creates an appropriate context manager for "),Rc=n("code"),G2=r("autocast"),j2=r(` while feeding it the desired arguments, depending on the situation.`),M2=l(),dt=n("div"),h(va.$$.fragment),V2=l(),Wc=n("p"),H2=r("How the loss is computed by Trainer. By default, all models return the loss in the first element."),B2=l(),Gc=n("p"),Y2=r("Subclass and override for custom behavior."),Z2=l(),ct=n("div"),h(ba.$$.fragment),J2=l(),jc=n("p"),X2=r("Setup the optimizer."),K2=l(),ya=n("p"),Q2=r(`We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the Trainer\u2019s init through `),Mc=n("code"),ew=r("optimizers"),tw=r(", or subclass and override this method in a subclass."),ow=l(),pt=n("div"),h(wa.$$.fragment),rw=l(),Vc=n("p"),aw=r("Setup the optimizer and the learning rate scheduler."),nw=l(),Xe=n("p"),sw=r(`We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the Trainer\u2019s init through `),Hc=n("code"),iw=r("optimizers"),lw=r(", or subclass and override this method (or "),Bc=n("code"),dw=r("create_optimizer"),cw=r(` and/or `),Yc=n("code"),pw=r("create_scheduler"),mw=r(") in a subclass."),hw=l(),Ho=n("div"),h(Ea.$$.fragment),uw=l(),Zc=n("p"),fw=r(`Setup the scheduler. The optimizer of the trainer must have been set up either before this method is called or passed as an argument.`),gw=l(),Pe=n("div"),h(Ta.$$.fragment),_w=l(),Jc=n("p"),vw=r("Run evaluation and returns metrics."),bw=l(),$a=n("p"),yw=r(`The calling script will be responsible for providing a method to compute metrics, as they are task-dependent (pass it to the init `),Xc=n("code"),ww=r("compute_metrics"),Ew=r(" argument)."),Tw=l(),Kc=n("p"),$w=r("You can also subclass and override this method to inject custom behavior."),kw=l(),mt=n("div"),h(ka.$$.fragment),xw=l(),eo=n("p"),Aw=r("Prediction/evaluation loop, shared by "),Qc=n("code"),Dw=r("Trainer.evaluate()"),qw=r(" and "),ep=n("code"),Pw=r("Trainer.predict()"),Sw=r("."),Ow=l(),tp=n("p"),Cw=r("Works both with or without labels."),Iw=l(),Bo=n("div"),h(xa.$$.fragment),Uw=l(),Aa=n("p"),Nw=r("For models that inherit from "),bi=n("a"),zw=r("PreTrainedModel"),Lw=r(`, uses that method to compute the number of floating point operations for every backward + forward pass. If using another model, either implement such a method in the model or subclass and override this method.`),Fw=l(),ht=n("div"),h(Da.$$.fragment),Rw=l(),qa=n("p"),Ww=r("Returns the evaluation "),op=n("code"),Gw=r("DataLoader"),jw=r("."),Mw=l(),rp=n("p"),Vw=r("Subclass and override this method if you want to inject some custom behavior."),Hw=l(),Yo=n("div"),h(Pa.$$.fragment),Bw=l(),ap=n("p"),Yw=r("Returns the optimizer class and optimizer parameters based on the training arguments."),Zw=l(),ut=n("div"),h(Sa.$$.fragment),Jw=l(),Oa=n("p"),Xw=r("Returns the test "),np=n("code"),Kw=r("DataLoader"),Qw=r("."),e0=l(),sp=n("p"),t0=r("Subclass and override this method if you want to inject some custom behavior."),o0=l(),Se=n("div"),h(Ca.$$.fragment),r0=l(),Ia=n("p"),a0=r("Returns the training "),ip=n("code"),n0=r("DataLoader"),s0=r("."),i0=l(),to=n("p"),l0=r("Will use no sampler if "),lp=n("code"),d0=r("self.train_dataset"),c0=r(" does not implement "),dp=n("code"),p0=r("__len__"),m0=r(`, a random sampler (adapted to distributed training if necessary) otherwise.`),h0=l(),cp=n("p"),u0=r("Subclass and override this method if you want to inject some custom behavior."),f0=l(),ft=n("div"),h(Ua.$$.fragment),g0=l(),Ee=n("p"),_0=r("Launch an hyperparameter search using "),pp=n("code"),v0=r("optuna"),b0=r(" or "),mp=n("code"),y0=r("Ray Tune"),w0=r(" or "),hp=n("code"),E0=r("SigOpt"),T0=r(`. The optimized quantity is determined by `),up=n("code"),$0=r("compute_objective"),k0=r(`, which defaults to a function returning the evaluation loss when no metric is provided, the sum of all metrics otherwise.`),x0=l(),h(Zo.$$.fragment),A0=l(),Jo=n("div"),h(Na.$$.fragment),D0=l(),za=n("p"),q0=r("Initializes a git repo in "),fp=n("code"),P0=r("self.args.hub_model_id"),S0=r("."),O0=l(),Xo=n("div"),h(La.$$.fragment),C0=l(),gp=n("p"),I0=r(`Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several machines) main process.`),U0=l(),Ko=n("div"),h(Fa.$$.fragment),N0=l(),Ra=n("p"),z0=r(`Whether or not this process is the global main process (when training in a distributed fashion on several machines, this is only going to be `),_p=n("code"),L0=r("True"),F0=r(" for one process)."),R0=l(),gt=n("div"),h(Wa.$$.fragment),W0=l(),Ga=n("p"),G0=r("Log "),vp=n("code"),j0=r("logs"),M0=r(" on the various objects watching training."),V0=l(),bp=n("p"),H0=r("Subclass and override this method to inject custom behavior."),B0=l(),k=n("div"),h(ja.$$.fragment),Y0=l(),yp=n("p"),Z0=r("Log metrics in a specially formatted way"),J0=l(),wp=n("p"),X0=r("Under distributed environment this is done only for a process with rank 0."),K0=l(),Ep=n("p"),Q0=r("Notes on memory reports:"),eE=l(),oo=n("p"),tE=r("In order to get memory usage report you need to install "),Tp=n("code"),oE=r("psutil"),rE=r(". You can do that with "),$p=n("code"),aE=r("pip install psutil"),nE=r("."),sE=l(),kp=n("p"),iE=r("Now when this method is run, you will see a report that will include: :"),lE=l(),h(Ma.$$.fragment),dE=l(),xp=n("p"),Ap=n("strong"),cE=r("Understanding the reports:"),pE=l(),Ke=n("ul"),Te=n("li"),mE=r("the first segment, e.g., "),Dp=n("code"),hE=r("train__"),uE=r(", tells you which stage the metrics are for. Reports starting with "),qp=n("code"),fE=r("init_"),gE=r(` will be added to the first stage that gets run. So that if only evaluation is run, the memory usage for the `),Pp=n("code"),_E=r("__init__"),vE=r(" will be reported along with the "),Sp=n("code"),bE=r("eval_"),yE=r(" metrics."),wE=l(),ro=n("li"),EE=r("the third segment, is either "),Op=n("code"),TE=r("cpu"),$E=r(" or "),Cp=n("code"),kE=r("gpu"),xE=r(`, tells you whether it\u2019s the general RAM or the gpu0 memory metric.`),AE=l(),yi=n("li"),Ip=n("code"),DE=r("*_alloc_delta"),qE=r(` - is the difference in the used/allocated memory counter between the end and the start of the stage - it can be negative if a function released more memory than it allocated.`),PE=l(),_t=n("li"),Up=n("code"),SE=r("*_peaked_delta"),OE=r(` - is any extra memory that was consumed and then freed - relative to the current allocated memory counter - it is never negative. When you look at the metrics of any stage you add up `),Np=n("code"),CE=r("alloc_delta"),IE=r(` + `),zp=n("code"),UE=r("peaked_delta"),NE=r(" and you know how much memory was needed to complete that stage."),zE=l(),Lp=n("p"),LE=r(`The reporting happens only for process of rank 0 and gpu 0 (if there is a gpu). Typically this is enough since the main process does the bulk of work, but it could be not quite so if model parallel is used and then other GPUs may use a different amount of gpu memory. This is also not the same under DataParallel where gpu0 may require much more memory than the rest since it stores the gradient and optimizer states for all participating GPUS. Perhaps in the future these reports will evolve to measure those too.`),FE=l(),Fp=n("p"),RE=r(`The CPU RAM metric measures RSS (Resident Set Size) includes both the memory which is unique to the process and the memory shared with other processes. It is important to note that it does not include swapped out memory, so the reports could be imprecise.`),WE=l(),Va=n("p"),GE=r(`The CPU peak memory is measured using a sampling thread. Due to python\u2019s GIL it may miss some of the peak memory if that thread didn\u2019t get a chance to run when the highest memory was used. Therefore this report can be less than reality. Using `),Rp=n("code"),jE=r("tracemalloc"),ME=r(` would have reported the exact peak memory, but it doesn\u2019t report memory allocations outside of python. So if some C++ CUDA extension allocated its own memory it won\u2019t be reported. And therefore it was dropped in favor of the memory sampling approach, which reads the current process memory usage.`),VE=l(),Qe=n("p"),HE=r("The GPU allocated and peak memory reporting is done with "),Wp=n("code"),BE=r("torch.cuda.memory_allocated()"),YE=r(` and `),Gp=n("code"),ZE=r("torch.cuda.max_memory_allocated()"),JE=r(`. This metric reports only \u201Cdeltas\u201D for pytorch-specific allocations, as `),jp=n("code"),XE=r("torch.cuda"),KE=r(` memory management system doesn\u2019t track any memory allocated outside of pytorch. For example, the very first cuda call typically loads CUDA kernels, which may take from 0.5 to 2GB of GPU memory.`),QE=l(),re=n("p"),e4=r("Note that this tracker doesn\u2019t account for memory allocations outside of "),wi=n("a"),t4=r("Trainer"),o4=r("\u2019s "),Mp=n("code"),r4=r("__init__"),a4=r(", "),Vp=n("code"),n4=r("train"),s4=r(`, `),Hp=n("code"),i4=r("evaluate"),l4=r(" and "),Bp=n("code"),d4=r("predict"),c4=r(" calls."),p4=l(),I=n("p"),m4=r("Because "),Yp=n("code"),h4=r("evaluation"),u4=r(" calls may happen during "),Zp=n("code"),f4=r("train"),g4=r(`, we can\u2019t handle nested invocations because `),Jp=n("code"),_4=r("torch.cuda.max_memory_allocated"),v4=r(" is a single counter, so if it gets reset by a nested eval call, "),Xp=n("code"),b4=r("train"),y4=r(`\u2019s tracker will report incorrect info. If this `),Ha=n("a"),w4=r("pytorch issue"),E4=r(` gets resolved it will be possible to change this class to be re-entrant. Until then we will only track the outer level of `),Kp=n("code"),T4=r("train"),$4=r(", "),Qp=n("code"),k4=r("evaluate"),x4=r(" and "),em=n("code"),A4=r("predict"),D4=r(" methods. Which means that if "),tm=n("code"),q4=r("eval"),P4=r(" is called during "),om=n("code"),S4=r("train"),O4=r(`, it\u2019s the latter that will account for its memory usage and that of the former.`),C4=l(),$e=n("p"),I4=r("This also means that if any other tool that is used along the "),Ei=n("a"),U4=r("Trainer"),N4=r(` calls `),rm=n("code"),z4=r("torch.cuda.reset_peak_memory_stats"),L4=r(", the gpu peak memory stats could be invalid. And the "),Ti=n("a"),F4=r("Trainer"),R4=r(` will disrupt the normal behavior of any such tools that rely on calling `),am=n("code"),W4=r("torch.cuda.reset_peak_memory_stats"),G4=r(" themselves."),j4=l(),nm=n("p"),M4=r("For best performance you may want to consider turning the memory profiling off for production runs."),V4=l(),Qo=n("div"),h(Ba.$$.fragment),H4=l(),sm=n("p"),B4=r("Reformat Trainer metrics values to a human-readable format"),Y4=l(),vt=n("div"),h(Ya.$$.fragment),Z4=l(),Za=n("p"),J4=r("Helper to get number of samples in a "),im=n("code"),X4=r("DataLoader"),K4=r(" by accessing its dataset."),Q4=l(),$i=n("p"),eT=r("Will raise an exception if the underlying dataset does not implement method "),lm=n("code"),tT=r("__len__"),oT=l(),bt=n("div"),h(Ja.$$.fragment),rT=l(),Xa=n("p"),aT=r("Remove a callback from the current list of "),dm=n("code"),nT=r("TrainerCallback"),sT=r(" and returns it."),iT=l(),Ka=n("p"),lT=r("If the callback is not found, returns "),cm=n("code"),dT=r("None"),cT=r(" (and no error is raised)."),pT=l(),X=n("div"),h(Qa.$$.fragment),mT=l(),pm=n("p"),hT=r("Run prediction and returns predictions and potential metrics."),uT=l(),en=n("p"),fT=r(`Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method will also return metrics, like in `),mm=n("code"),gT=r("evaluate()"),_T=r("."),vT=l(),h(er.$$.fragment),bT=l(),tn=n("p"),yT=r("Returns: "),hm=n("em"),wT=r("NamedTuple"),ET=r(" A namedtuple with the following keys:"),TT=l(),ao=n("ul"),no=n("li"),$T=r("predictions ("),um=n("code"),kT=r("np.ndarray"),xT=r("): The predictions on "),fm=n("code"),AT=r("test_dataset"),DT=r("."),qT=l(),so=n("li"),PT=r("label_ids ("),gm=n("code"),ST=r("np.ndarray"),OT=r(", "),_m=n("em"),CT=r("optional"),IT=r("): The labels (if the dataset contained some)."),UT=l(),io=n("li"),NT=r("metrics ("),vm=n("code"),zT=r("Dict[str, float]"),LT=r(", "),bm=n("em"),FT=r("optional"),RT=r(`): The potential dictionary of metrics (if the dataset contained labels).`),WT=l(),yt=n("div"),h(on.$$.fragment),GT=l(),lo=n("p"),jT=r("Prediction/evaluation loop, shared by "),ym=n("code"),MT=r("Trainer.evaluate()"),VT=r(" and "),wm=n("code"),HT=r("Trainer.predict()"),BT=r("."),YT=l(),Em=n("p"),ZT=r("Works both with or without labels."),JT=l(),wt=n("div"),h(rn.$$.fragment),XT=l(),co=n("p"),KT=r("Perform an evaluation step on "),Tm=n("code"),QT=r("model"),e3=r(" using "),$m=n("code"),t3=r("inputs"),o3=r("."),r3=l(),km=n("p"),a3=r("Subclass and override to inject custom behavior."),n3=l(),tr=n("div"),h(an.$$.fragment),s3=l(),et=n("p"),i3=r("Upload "),xm=n("em"),l3=r("self.model"),d3=r(" and "),Am=n("em"),c3=r("self.tokenizer"),p3=r(" to the \u{1F917} model hub on the repo "),Dm=n("em"),m3=r("self.args.hub_model_id"),h3=r("."),u3=l(),or=n("div"),h(nn.$$.fragment),f3=l(),sn=n("p"),g3=r("Remove a callback from the current list of "),qm=n("code"),_3=r("TrainerCallback"),v3=r("."),b3=l(),Oe=n("div"),h(ln.$$.fragment),y3=l(),dn=n("p"),w3=r("Save metrics into a json file for that split, e.g. "),Pm=n("code"),E3=r("train_results.json"),T3=r("."),$3=l(),Sm=n("p"),k3=r("Under distributed environment this is done only for a process with rank 0."),x3=l(),cn=n("p"),A3=r("To understand the metrics please read the docstring of "),Om=n("code"),D3=r("log_metrics()"),q3=r(` The only difference is that raw unformatted numbers are saved in the current method.`),P3=l(),Et=n("div"),h(pn.$$.fragment),S3=l(),mn=n("p"),O3=r("Will save the model, so you can reload it using "),Cm=n("code"),C3=r("from_pretrained()"),I3=r("."),U3=l(),Im=n("p"),N3=r("Will only save from the main process."),z3=l(),Tt=n("div"),h(hn.$$.fragment),L3=l(),Um=n("p"),F3=r("Saves the Trainer state, since Trainer.save_model saves only the tokenizer with the model"),R3=l(),Nm=n("p"),W3=r("Under distributed environment this is done only for a process with rank 0."),G3=l(),rr=n("div"),h(un.$$.fragment),j3=l(),zm=n("p"),M3=r("Main training entry point."),V3=l(),$t=n("div"),h(fn.$$.fragment),H3=l(),Lm=n("p"),B3=r("Perform a training step on a batch of inputs."),Y3=l(),Fm=n("p"),Z3=r("Subclass and override to inject custom behavior."),Lg=l(),po=n("h2"),ar=n("a"),Rm=n("span"),h(gn.$$.fragment),J3=l(),Wm=n("span"),X3=r("Seq2SeqTrainer"),Fg=l(),tt=n("div"),h(_n.$$.fragment),K3=l(),Ce=n("div"),h(vn.$$.fragment),Q3=l(),Gm=n("p"),e6=r("Run evaluation and returns metrics."),t6=l(),bn=n("p"),o6=r(`The calling script will be responsible for providing a method to compute metrics, as they are task-dependent (pass it to the init `),jm=n("code"),r6=r("compute_metrics"),a6=r(" argument)."),n6=l(),Mm=n("p"),s6=r("You can also subclass and override this method to inject custom behavior."),i6=l(),K=n("div"),h(yn.$$.fragment),l6=l(),Vm=n("p"),d6=r("Run prediction and returns predictions and potential metrics."),c6=l(),wn=n("p"),p6=r(`Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method will also return metrics, like in `),Hm=n("code"),m6=r("evaluate()"),h6=r("."),u6=l(),h(nr.$$.fragment),f6=l(),En=n("p"),g6=r("Returns: "),Bm=n("em"),_6=r("NamedTuple"),v6=r(" A namedtuple with the following keys:"),b6=l(),mo=n("ul"),ho=n("li"),y6=r("predictions ("),Ym=n("code"),w6=r("np.ndarray"),E6=r("): The predictions on "),Zm=n("code"),T6=r("test_dataset"),$6=r("."),k6=l(),uo=n("li"),x6=r("label_ids ("),Jm=n("code"),A6=r("np.ndarray"),D6=r(", "),Xm=n("em"),q6=r("optional"),P6=r("): The labels (if the dataset contained some)."),S6=l(),fo=n("li"),O6=r("metrics ("),Km=n("code"),C6=r("Dict[str, float]"),I6=r(", "),Qm=n("em"),U6=r("optional"),N6=r(`): The potential dictionary of metrics (if the dataset contained labels).`),Rg=l(),go=n("h2"),sr=n("a"),eh=n("span"),h(Tn.$$.fragment),z6=l(),th=n("span"),L6=r("TrainingArguments"),Wg=l(),z=n("div"),h($n.$$.fragment),F6=l(),kn=n("p"),R6=r("TrainingArguments is the subset of the arguments we use in our example scripts "),oh=n("strong"),W6=r(`which relate to the training loop itself`),G6=r("."),j6=l(),_o=n("p"),M6=r("Using "),ki=n("a"),V6=r("HfArgumentParser"),H6=r(` we can turn this class into `),xn=n("a"),B6=r("argparse"),Y6=r(` arguments that can be specified on the command line.`),Z6=l(),ce=n("div"),h(An.$$.fragment),J6=l(),rh=n("p"),X6=r(`Returns the log level to be used depending on whether this process is the main process of node 0, main process of node non-0, or a non-main process.`),K6=l(),vo=n("p"),Q6=r("For the main process the log level defaults to "),ah=n("code"),e$=r("logging.INFO"),t$=r(" unless overridden by "),nh=n("code"),o$=r("log_level"),r$=r(" argument."),a$=l(),bo=n("p"),n$=r("For the replica processes the log level defaults to "),sh=n("code"),s$=r("logging.WARNING"),i$=r(" unless overridden by "),ih=n("code"),l$=r("log_level_replica"),d$=r(` argument.`),c$=l(),Dn=n("p"),p$=r("The choice between the main and replica process settings is made according to the return value of "),lh=n("code"),m$=r("should_log"),h$=r("."),u$=l(),ir=n("div"),h(qn.$$.fragment),f$=l(),dh=n("p"),g$=r("Get number of steps used for a linear warmup."),_$=l(),kt=n("div"),h(Pn.$$.fragment),v$=l(),ch=n("p"),b$=r(`A context manager for torch distributed environment where on needs to do something on the main process, while blocking replicas, and when it\u2019s finished releasing the replicas.`),y$=l(),yo=n("p"),w$=r("One such use is for "),ph=n("code"),E$=r("datasets"),T$=r("\u2019s "),mh=n("code"),$$=r("map"),k$=r(` feature which to be efficient should be run once on the main process, which upon completion saves a cached version of results and which then automatically gets loaded by the replicas.`),x$=l(),lr=n("div"),h(Sn.$$.fragment),A$=l(),On=n("p"),D$=r("Serializes this instance while replace "),hh=n("code"),q$=r("Enum"),P$=r(` by their values (for JSON serialization support). It obfuscates the token values by removing their value.`),S$=l(),dr=n("div"),h(Cn.$$.fragment),O$=l(),uh=n("p"),C$=r("Serializes this instance to a JSON string."),I$=l(),cr=n("div"),h(In.$$.fragment),U$=l(),fh=n("p"),N$=r("Sanitized serialization to use with TensorBoard\u2019s hparams"),Gg=l(),wo=n("h2"),pr=n("a"),gh=n("span"),h(Un.$$.fragment),z$=l(),_h=n("span"),L$=r("Seq2SeqTrainingArguments"),jg=l(),ae=n("div"),h(Nn.$$.fragment),F$=l(),zn=n("p"),R$=r("TrainingArguments is the subset of the arguments we use in our example scripts "),vh=n("strong"),W$=r(`which relate to the training loop itself`),G$=r("."),j$=l(),Eo=n("p"),M$=r("Using "),xi=n("a"),V$=r("HfArgumentParser"),H$=r(` we can turn this class into `),Ln=n("a"),B$=r("argparse"),Y$=r(` arguments that can be specified on the command line.`),Z$=l(),ne=n("p"),J$=r("sortish_sampler ("),bh=n("code"),X$=r("bool"),K$=r(", "),yh=n("em"),Q$=r("optional"),ek=r(", defaults to "),wh=n("code"),tk=r("False"),ok=r(`): Whether to use a `),Eh=n("em"),rk=r("sortish sampler"),ak=r(" or not. Only possible if the underlying datasets are "),Th=n("em"),nk=r("Seq2SeqDataset"),sk=r(` for now but will become generally available in the near future.`),ik=l(),q=n("p"),lk=r(`It sorts the inputs according to lengths in order to minimize the padding size, with a bit of randomness for the training set. predict_with_generate (`),$h=n("code"),dk=r("bool"),ck=r(", "),kh=n("em"),pk=r("optional"),mk=r(", defaults to "),xh=n("code"),hk=r("False"),uk=r(`): Whether to use generate to calculate generative metrics (ROUGE, BLEU). generation_max_length (`),Ah=n("code"),fk=r("int"),gk=r(", "),Dh=n("em"),_k=r("optional"),vk=r(`): The `),qh=n("code"),bk=r("max_length"),yk=r(" to use on each evaluation loop when "),Ph=n("code"),wk=r("predict_with_generate=True"),Ek=r(`. Will default to the `),Sh=n("code"),Tk=r("max_length"),$k=r(` value of the model configuration. generation_num_beams (`),Oh=n("code"),kk=r("int"),xk=r(", "),Ch=n("em"),Ak=r("optional"),Dk=r(`): The `),Ih=n("code"),qk=r("num_beams"),Pk=r(" to use on each evaluation loop when "),Uh=n("code"),Sk=r("predict_with_generate=True"),Ok=r(`. Will default to the `),Nh=n("code"),Ck=r("num_beams"),Ik=r(" value of the model configuration."),Mg=l(),To=n("h2"),mr=n("a"),zh=n("span"),h(Fn.$$.fragment),Uk=l(),Lh=n("span"),Nk=r("Checkpoints"),Vg=l(),pe=n("p"),zk=r("By default, "),Ai=n("a"),Lk=r("Trainer"),Fk=r(" will save all checkpoints in the "),Fh=n("code"),Rk=r("output_dir"),Wk=r(` you set in the `),Di=n("a"),Gk=r("TrainingArguments"),jk=r(" you are using. Those will go in subfolder named "),Rh=n("code"),Mk=r("checkpoint-xxx"),Vk=r(` with xxx being the step at which the training was at.`),Hg=l(),hr=n("p"),Hk=r("Resuming training from a checkpoint can be done when calling "),qi=n("a"),Bk=r("Trainer.train()"),Yk=r(" with either:"),Bg=l(),ur=n("ul"),Pi=n("li"),Wh=n("code"),Zk=r("resume_from_checkpoint=True"),Jk=r(" which will resume training from the latest checkpoint"),Xk=l(),Si=n("li"),Gh=n("code"),Kk=r("resume_from_checkpoint=checkpoint_dir"),Qk=r(` which will resume training from the specific checkpoint in the directory passed.`),Yg=l(),Ie=n("p"),e5=r("In addition, you can easily save your checkpoints on the Model Hub when using "),jh=n("code"),t5=r("push_to_hub=True"),o5=r(`. By default, all the models saved in intermediate checkpoints are saved in different commits, but not the optimizer state. You can adapt the `),Mh=n("code"),r5=r("hub-strategy"),a5=r(" value of your "),Oi=n("a"),n5=r("TrainingArguments"),s5=r(" to either:"),Zg=l(),fr=n("ul"),gr=n("li"),Vh=n("code"),i5=r('"checkpoint"'),l5=r(`: the latest checkpoint is also pushed in a subfolder named last-checkpoint, allowing you to resume training easily with `),Hh=n("code"),d5=r('trainer.train(resume_from_checkpoint="output_dir/last-checkpoint")'),c5=r("."),p5=l(),Ci=n("li"),Bh=n("code"),m5=r('"all_checkpoints"'),h5=r(`: all checkpoints are pushed like they appear in the output folder (so you will get one checkpoint folder per folder in your final repository)`),Jg=l(),$o=n("h2"),_r=n("a"),Yh=n("span"),h(Rn.$$.fragment),u5=l(),Zh=n("span"),f5=r("Logging"),Xg=l(),Ue=n("p"),g5=r("By default "),Ii=n("a"),_5=r("Trainer"),v5=r(" will use "),Jh=n("code"),b5=r("logging.INFO"),y5=r(" for the main process and "),Xh=n("code"),w5=r("logging.WARNING"),E5=r(" for the replicas if any."),Kg=l(),xt=n("p"),T5=r("These defaults can be overridden to use any of the 5 "),Kh=n("code"),$5=r("logging"),k5=r(" levels with "),Ui=n("a"),x5=r("TrainingArguments"),A5=r(`\u2019s arguments:`),Qg=l(),vr=n("ul"),Ni=n("li"),Qh=n("code"),D5=r("log_level"),q5=r(" - for the main process"),P5=l(),zi=n("li"),eu=n("code"),S5=r("log_level_replica"),O5=r(" - for the replicas"),e_=l(),Ne=n("p"),C5=r("Further, if "),Li=n("a"),I5=r("TrainingArguments"),U5=r("\u2019s "),tu=n("code"),N5=r("log_on_each_node"),z5=r(" is set to "),ou=n("code"),L5=r("False"),F5=r(` only the main node will use the log level settings for its main process, all other nodes will use the log level settings for replicas.`),t_=l(),Q=n("p"),R5=r("Note that "),Fi=n("a"),W5=r("Trainer"),G5=r(" is going to set "),ru=n("code"),j5=r("transformers"),M5=r(`\u2019s log level separately for each node in its `),au=n("code"),V5=r("Trainer.__init__()"),H5=r(` So you may want to set this sooner (see the next example) if you tap into other `),nu=n("code"),B5=r("transformers"),Y5=r(" functionality before creating the "),Ri=n("a"),Z5=r("Trainer"),J5=r(" object."),o_=l(),Wi=n("p"),X5=r("Here is an example of how this can be used in an application:"),r_=l(),h(Wn.$$.fragment),a_=l(),Gi=n("p"),K5=r(`And then if you only want to see warnings on the main node and all other nodes to not print any most likely duplicated warnings you could run it as:`),n_=l(),h(Gn.$$.fragment),s_=l(),ji=n("p"),Q5=r(`In the multi-node environment if you also don\u2019t want the logs to repeat for each node\u2019s main process, you will want to change the above to:`),i_=l(),h(jn.$$.fragment),l_=l(),Mi=n("p"),ex=r(`and then only the main process of the first node will log at the \u201Cwarning\u201D level, and all other processes on the main node and all processes on other nodes will log at the \u201Cerror\u201D level.`),d_=l(),Vi=n("p"),tx=r("If you need your application to be as quiet as possible you could do:"),c_=l(),h(Mn.$$.fragment),p_=l(),br=n("p"),ox=r("(add "),su=n("code"),rx=r("--log_on_each_node 0"),ax=r(" if on multi-node environment)"),m_=l(),ko=n("h2"),yr=n("a"),iu=n("span"),h(Vn.$$.fragment),nx=l(),lu=n("span"),sx=r("Randomness"),h_=l(),me=n("p"),ix=r("When resuming from a checkpoint generated by "),Hi=n("a"),lx=r("Trainer"),dx=r(` all efforts are made to restore the `),du=n("em"),cx=r("python"),px=r(", "),cu=n("em"),mx=r("numpy"),hx=r(" and "),pu=n("em"),ux=r("pytorch"),fx=r(` RNG states to the same states as they were at the moment of saving that checkpoint, which should make the \u201Cstop and resume\u201D style of training as close as possible to non-stop training.`),u_=l(),At=n("p"),gx=r(`However, due to various default non-deterministic pytorch settings this might not fully work. If you want full determinism please refer to `),Hn=n("a"),_x=r("Controlling sources of randomness"),vx=r(`. As explained in the document, that some of those settings that make things deterministic (.e.g., `),mu=n("code"),bx=r("torch.backends.cudnn.deterministic"),yx=r(`) may slow things down, therefore this can\u2019t be done by default, but you can enable those yourself if needed.`),f_=l(),xo=n("h2"),wr=n("a"),hu=n("span"),h(Bn.$$.fragment),wx=l(),uu=n("span"),Ex=r("Specific GPUs Selection"),g_=l(),Bi=n("p"),Tx=r("Let\u2019s discuss how you can tell your program which GPUs are to be used and in what order."),__=l(),Er=n("p"),$x=r("When using "),Yn=n("a"),fu=n("code"),kx=r("DistributedDataParallel"),xx=r(" to use only a subset of your GPUs, you simply specify the number of GPUs to use. For example, if you have 4 GPUs, but you wish to use the first 2 you can do:"),v_=l(),h(Zn.$$.fragment),b_=l(),Dt=n("p"),Ax=r("if you have either "),Jn=n("a"),gu=n("code"),Dx=r("accelerate"),qx=r(" or "),Xn=n("a"),_u=n("code"),Px=r("deepspeed"),Sx=r(" installed you can also accomplish the same by using one of:"),y_=l(),h(Kn.$$.fragment),w_=l(),h(Qn.$$.fragment),E_=l(),Tr=n("p"),Ox=r("You don\u2019t need to use the Accelerate or "),Yi=n("a"),Cx=r("the Deepspeed integration"),Ix=r(" features to use these launchers."),T_=l(),Zi=n("p"),Ux=r("Until now you were able to tell the program how many GPUs to use. Now let\u2019s discuss how to select specific GPUs and control their order."),$_=l(),Ji=n("p"),Nx=r("The following environment variables help you control which GPUs to use and their order."),k_=l(),Xi=n("p"),vu=n("strong"),bu=n("code"),zx=r("CUDA_VISIBLE_DEVICES"),x_=l(),$r=n("p"),Lx=r("If you have multiple GPUs and you\u2019d like to use only 1 or a few of those GPUs, set the environment variable "),yu=n("code"),Fx=r("CUDA_VISIBLE_DEVICES"),Rx=r(" to a list of the GPUs to be used."),A_=l(),Ki=n("p"),Wx=r("For example, let\u2019s say you have 4 GPUs: 0, 1, 2 and 3. To run only on the physical GPUs 0 and 2, you can do:"),D_=l(),h(es.$$.fragment),q_=l(),qt=n("p"),Gx=r("So now pytorch will see only 2 GPUs, where your physical GPUs 0 and 2 are mapped to "),wu=n("code"),jx=r("cuda:0"),Mx=r(" and "),Eu=n("code"),Vx=r("cuda:1"),Hx=r(" correspondingly."),P_=l(),Qi=n("p"),Bx=r("You can even change their order:"),S_=l(),h(ts.$$.fragment),O_=l(),Pt=n("p"),Yx=r("Here your physical GPUs 0 and 2 are mapped to "),Tu=n("code"),Zx=r("cuda:1"),Jx=r(" and "),$u=n("code"),Xx=r("cuda:0"),Kx=r(" correspondingly."),C_=l(),St=n("p"),Qx=r("The above examples were all for "),ku=n("code"),eA=r("DistributedDataParallel"),tA=r(" use pattern, but the same method works for "),os=n("a"),xu=n("code"),oA=r("DataParallel"),rA=r(" as well:"),I_=l(),h(rs.$$.fragment),U_=l(),el=n("p"),aA=r("To emulate an environment without GPUs simply set this environment variable to an empty value like so:"),N_=l(),h(as.$$.fragment),z_=l(),tl=n("p"),nA=r("As with any environment variable you can, of course, export those instead of adding these to the command line, as in:"),L_=l(),h(ns.$$.fragment),F_=l(),ol=n("p"),sA=r("but this approach can be confusing since you may forget you set up the environment variable earlier and not understand why the wrong GPUs are used. Therefore, it\u2019s a common practice to set the environment variable just for a specific run on the same command line as it\u2019s shown in most examples of this section."),R_=l(),rl=n("p"),Au=n("strong"),Du=n("code"),iA=r("CUDA_DEVICE_ORDER"),W_=l(),kr=n("p"),lA=r("There is an additional environment variable "),qu=n("code"),dA=r("CUDA_DEVICE_ORDER"),cA=r(" that controls how the physical devices are ordered. The two choices are:"),G_=l(),al=n("ol"),ss=n("li"),pA=r("ordered by PCIe bus IDs (matches "),Pu=n("code"),mA=r("nvidia-smi"),hA=r("\u2019s order) - this is the default."),j_=l(),h(is.$$.fragment),M_=l(),ls=n("ol"),Su=n("li"),uA=r("ordered by GPU compute capabilities"),V_=l(),h(ds.$$.fragment),H_=l(),Ot=n("p"),fA=r("Most of the time you don\u2019t need to care about this environment variable, but it\u2019s very helpful if you have a lopsided setup where you have an old and a new GPUs physically inserted in such a way so that the slow older card appears to be first. One way to fix that is to swap the cards. But if you can\u2019t swap the cards (e.g., if the cooling of the devices gets impacted) then setting "),Ou=n("code"),gA=r("CUDA_DEVICE_ORDER=FASTEST_FIRST"),_A=r(" will always put the newer faster card first. It\u2019ll be somewhat confusing though since "),Cu=n("code"),vA=r("nvidia-smi"),bA=r(" will still report them in the PCIe order."),B_=l(),nl=n("p"),yA=r("The other solution to swapping the order is to use:"),Y_=l(),h(cs.$$.fragment),Z_=l(),sl=n("p"),wA=r("In this example we are working with just 2 GPUs, but of course the same would apply to as many GPUs as your computer has."),J_=l(),xr=n("p"),EA=r("Also if you do set this environment variable it\u2019s the best to set it in your "),Iu=n("code"),TA=r("~/.bashrc"),$A=r(" file or some other startup config file and forget about it."),X_=l(),Ao=n("h2"),Ar=n("a"),Uu=n("span"),h(ps.$$.fragment),kA=l(),Nu=n("span"),xA=r("Trainer Integrations"),K_=l(),Dr=n("p"),AA=r("The "),il=n("a"),DA=r("Trainer"),qA=r(` has been extended to support libraries that may dramatically improve your training time and fit much bigger models.`),Q_=l(),ze=n("p"),PA=r("Currently it supports third party solutions, "),ms=n("a"),SA=r("DeepSpeed"),OA=r(" and "),hs=n("a"),CA=r("FairScale"),IA=r(", which implement parts of the paper "),us=n("a"),UA=r(`ZeRO: Memory Optimizations Toward Training Trillion Parameter Models, by Samyam Rajbhandari, Jeff Rasley, Olatunji Ruwase, Yuxiong He`),NA=r("."),ev=l(),ll=n("p"),zA=r("This provided support is new and experimental as of this writing."),tv=l(),dl=n("a"),ov=l(),Do=n("h3"),qr=n("a"),zu=n("span"),h(fs.$$.fragment),LA=l(),Lu=n("span"),FA=r("CUDA Extension Installation Notes"),rv=l(),cl=n("p"),RA=r("As of this writing, both FairScale and Deepspeed require compilation of CUDA C++ code, before they can be used."),av=l(),Ct=n("p"),WA=r("While all installation issues should be dealt with through the corresponding GitHub Issues of "),gs=n("a"),GA=r("FairScale"),jA=r(" and "),_s=n("a"),MA=r("Deepspeed"),VA=r(`, there are a few common issues that one may encounter while building any PyTorch extension that needs to build CUDA extensions.`),nv=l(),pl=n("p"),HA=r("Therefore, if you encounter a CUDA-related build issue while doing one of the following or both:"),sv=l(),h(vs.$$.fragment),iv=l(),ml=n("p"),BA=r("please, read the following notes first."),lv=l(),It=n("p"),YA=r("In these notes we give examples for what to do when "),Fu=n("code"),ZA=r("pytorch"),JA=r(" has been built with CUDA "),Ru=n("code"),XA=r("10.2"),KA=r(`. If your situation is different remember to adjust the version number to the one you are after.`),dv=l(),qo=n("h4"),Pr=n("a"),Wu=n("span"),h(bs.$$.fragment),QA=l(),Gu=n("span"),e9=r("Possible problem #1"),cv=l(),hl=n("p"),t9=r(`While, Pytorch comes with its own CUDA toolkit, to build these two projects you must have an identical version of CUDA installed system-wide.`),pv=l(),Le=n("p"),o9=r("For example, if you installed "),ju=n("code"),r9=r("pytorch"),a9=r(" with "),Mu=n("code"),n9=r("cudatoolkit==10.2"),s9=r(` in the Python environment, you also need to have CUDA `),Vu=n("code"),i9=r("10.2"),l9=r(" installed system-wide."),mv=l(),Ut=n("p"),d9=r("The exact location may vary from system to system, but "),Hu=n("code"),c9=r("/usr/local/cuda-10.2"),p9=r(` is the most common location on many Unix systems. When CUDA is correctly set up and added to the `),Bu=n("code"),m9=r("PATH"),h9=r(` environment variable, one can find the installation location by doing:`),hv=l(),h(ys.$$.fragment),uv=l(),Sr=n("p"),u9=r(`If you don\u2019t have CUDA installed system-wide, install it first. You will find the instructions by using your favorite search engine. For example, if you\u2019re on Ubuntu you may want to search for: `),ws=n("a"),f9=r("ubuntu cuda 10.2 install"),g9=r("."),fv=l(),Po=n("h4"),Or=n("a"),Yu=n("span"),h(Es.$$.fragment),_9=l(),Zu=n("span"),v9=r("Possible problem #2"),gv=l(),ul=n("p"),b9=r(`Another possible common problem is that you may have more than one CUDA toolkit installed system-wide. For example you may have:`),_v=l(),h(Ts.$$.fragment),vv=l(),Nt=n("p"),y9=r("Now, in this situation you need to make sure that your "),Ju=n("code"),w9=r("PATH"),E9=r(" and "),Xu=n("code"),T9=r("LD_LIBRARY_PATH"),$9=r(` environment variables contain the correct paths to the desired CUDA version. Typically, package installers will set these to contain whatever the last version was installed. If you encounter the problem, where the package build fails because it can\u2019t find the right CUDA version despite you having it installed system-wide, it means that you need to adjust the 2 aforementioned environment variables.`),bv=l(),fl=n("p"),k9=r("First, you may look at their contents:"),yv=l(),h($s.$$.fragment),wv=l(),gl=n("p"),x9=r("so you get an idea of what is inside."),Ev=l(),Cr=n("p"),A9=r("It\u2019s possible that "),Ku=n("code"),D9=r("LD_LIBRARY_PATH"),q9=r(" is empty."),Tv=l(),ot=n("p"),Qu=n("code"),P9=r("PATH"),S9=r(" lists the locations of where executables can be found and "),ef=n("code"),O9=r("LD_LIBRARY_PATH"),C9=r(` is for where shared libraries are to looked for. In both cases, earlier entries have priority over the later ones. `),tf=n("code"),I9=r(":"),U9=r(` is used to separate multiple entries.`),$v=l(),_l=n("p"),N9=r(`Now, to tell the build program where to find the specific CUDA toolkit, insert the desired paths to be listed first by doing:`),kv=l(),h(ks.$$.fragment),xv=l(),vl=n("p"),z9=r("Note that we aren\u2019t overwriting the existing values, but prepending instead."),Av=l(),Fe=n("p"),L9=r(`Of course, adjust the version number, the full path if need be. Check that the directories you assign actually do exist. `),of=n("code"),F9=r("lib64"),R9=r(" sub-directory is where the various CUDA "),rf=n("code"),W9=r(".so"),G9=r(" objects, like "),af=n("code"),j9=r("libcudart.so"),M9=r(` reside, it\u2019s unlikely that your system will have it named differently, but if it is adjust it to reflect your reality.`),Dv=l(),So=n("h4"),Ir=n("a"),nf=n("span"),h(xs.$$.fragment),V9=l(),sf=n("span"),H9=r("Possible problem #3"),qv=l(),zt=n("p"),B9=r("Some older CUDA versions may refuse to build with newer compilers. For example, you my have "),lf=n("code"),Y9=r("gcc-9"),Z9=r(` but it wants `),df=n("code"),J9=r("gcc-7"),X9=r("."),Pv=l(),bl=n("p"),K9=r("There are various ways to go about it."),Sv=l(),yl=n("p"),Q9=r("If you can install the latest CUDA toolkit it typically should support the newer compiler."),Ov=l(),Ur=n("p"),e8=r(`Alternatively, you could install the lower version of the compiler in addition to the one you already have, or you may already have it but it\u2019s not the default one, so the build system can\u2019t see it. If you have `),cf=n("code"),t8=r("gcc-7"),o8=r(` installed but the build system complains it can\u2019t find it, the following might do the trick:`),Cv=l(),h(As.$$.fragment),Iv=l(),j=n("p"),r8=r("Here, we are making a symlink to "),pf=n("code"),a8=r("gcc-7"),n8=r(" from "),mf=n("code"),s8=r("/usr/local/cuda-10.2/bin/gcc"),i8=r(` and since `),hf=n("code"),l8=r("/usr/local/cuda-10.2/bin/"),d8=r(" should be in the "),uf=n("code"),c8=r("PATH"),p8=r(` environment variable (see the previous problem\u2019s solution), it should find `),ff=n("code"),m8=r("gcc-7"),h8=r(" (and "),gf=n("code"),u8=r("g++7"),f8=r(") and then the build will succeed."),Uv=l(),wl=n("p"),g8=r("As always make sure to edit the paths in the example to match your situation."),Nv=l(),Oo=n("h3"),Nr=n("a"),_f=n("span"),h(Ds.$$.fragment),_8=l(),vf=n("span"),v8=r("FairScale"),zv=l(),Re=n("p"),b8=r("By integrating "),qs=n("a"),y8=r("FairScale"),w8=r(" the "),El=n("a"),E8=r("Trainer"),T8=r(` provides support for the following features from `),Ps=n("a"),$8=r("the ZeRO paper"),k8=r(":"),Lv=l(),We=n("ol"),bf=n("li"),x8=r("Optimizer State Sharding"),A8=l(),yf=n("li"),D8=r("Gradient Sharding"),q8=l(),wf=n("li"),P8=r("Model Parameters Sharding (new and very experimental)"),S8=l(),Ef=n("li"),O8=r("CPU offload (new and very experimental)"),Fv=l(),Tl=n("p"),C8=r("You will need at least two GPUs to use this feature."),Rv=l(),Ss=n("p"),Tf=n("strong"),I8=r("Installation"),U8=r(":"),Wv=l(),$l=n("p"),N8=r("Install the library via pypi:"),Gv=l(),h(Os.$$.fragment),jv=l(),Lt=n("p"),z8=r("or via "),$f=n("code"),L8=r("transformers"),F8=r("\u2019 "),kf=n("code"),R8=r("extras"),W8=r(":"),Mv=l(),h(Cs.$$.fragment),Vv=l(),Ft=n("p"),G8=r("(available starting from "),xf=n("code"),j8=r("transformers==4.6.0"),M8=r(") or find more details on "),Is=n("a"),V8=r("the FairScale\u2019s GitHub page"),H8=r("."),Hv=l(),zr=n("p"),B8=r("If you\u2019re still struggling with the build, first make sure to read "),kl=n("a"),Y8=r("CUDA Extension Installation Notes"),Z8=r("."),Bv=l(),xl=n("p"),J8=r("If it\u2019s still not resolved the build issue, here are a few more ideas."),Yv=l(),Us=n("p"),Af=n("code"),X8=r("fairscale"),K8=r(` seems to have an issue with the recently introduced by pip build isolation feature. If you have a problem with it, you may want to try one of:`),Zv=l(),h(Ns.$$.fragment),Jv=l(),Al=n("p"),Q8=r("or:"),Xv=l(),h(zs.$$.fragment),Kv=l(),Ls=n("p"),Df=n("code"),eD=r("fairscale"),tD=r(" also has issues with building against pytorch-nightly, so if you use it you may have to try one of:"),Qv=l(),h(Fs.$$.fragment),e1=l(),Dl=n("p"),oD=r("or:"),t1=l(),h(Rs.$$.fragment),o1=l(),ql=n("p"),rD=r("Of course, adjust the urls to match the cuda version you use."),r1=l(),Lr=n("p"),aD=r(`If after trying everything suggested you still encounter build issues, please, proceed with the GitHub Issue of `),Ws=n("a"),nD=r("FairScale"),sD=r("."),a1=l(),Gs=n("p"),qf=n("strong"),iD=r("Usage"),lD=r(":"),n1=l(),Rt=n("p"),dD=r("To use the first version of Sharded data-parallelism, add "),Pf=n("code"),cD=r("--sharded_ddp simple"),pD=r(` to the command line arguments, and make sure you have added the distributed launcher `),Sf=n("code"),mD=r("-m torch.distributed.launch --nproc_per_node=NUMBER_OF_GPUS_YOU_HAVE"),hD=r(" if you haven\u2019t been using it already."),s1=l(),Fr=n("p"),uD=r("For example here is how you could use it for "),Of=n("code"),fD=r("run_translation.py"),gD=r(" with 2 GPUs:"),i1=l(),h(js.$$.fragment),l1=l(),Pl=n("p"),_D=r("Notes:"),d1=l(),Ge=n("ul"),Cf=n("li"),vD=r("This feature requires distributed training (so multiple GPUs)."),bD=l(),If=n("li"),yD=r("It is not implemented for TPUs."),wD=l(),Ms=n("li"),ED=r("It works with "),Uf=n("code"),TD=r("--fp16"),$D=r(" too, to make things even faster."),kD=l(),Vs=n("li"),xD=r("One of the main benefits of enabling "),Nf=n("code"),AD=r("--sharded_ddp simple"),DD=r(` is that it uses a lot less GPU memory, so you should be able to use significantly larger batch sizes using the same hardware (e.g. 3x and even bigger) which should lead to significantly shorter training time.`),c1=l(),Hs=n("ol"),rt=n("li"),qD=r("To use the second version of Sharded data-parallelism, add "),zf=n("code"),PD=r("--sharded_ddp zero_dp_2"),SD=r(" or "),Lf=n("code"),OD=r("--sharded_ddp zero_dp_3"),CD=r(" to the command line arguments, and make sure you have added the distributed launcher "),Ff=n("code"),ID=r("-m torch.distributed.launch --nproc_per_node=NUMBER_OF_GPUS_YOU_HAVE"),UD=r(" if you haven\u2019t been using it already."),p1=l(),Rr=n("p"),ND=r("For example here is how you could use it for "),Rf=n("code"),zD=r("run_translation.py"),LD=r(" with 2 GPUs:"),m1=l(),h(Bs.$$.fragment),h1=l(),Co=n("p"),Wf=n("code"),FD=r("zero_dp_2"),RD=r(" is an optimized version of the simple wrapper, while "),Gf=n("code"),WD=r("zero_dp_3"),GD=r(` fully shards model weights, gradients and optimizer states.`),u1=l(),Wt=n("p"),jD=r("Both are compatible with adding "),jf=n("code"),MD=r("cpu_offload"),VD=r(" to enable ZeRO-offload (activate it like this: "),Mf=n("code"),HD=r('--sharded_ddp "zero_dp_2 cpu_offload"'),BD=r(")."),f1=l(),Sl=n("p"),YD=r("Notes:"),g1=l(),he=n("ul"),Vf=n("li"),ZD=r("This feature requires distributed training (so multiple GPUs)."),JD=l(),Hf=n("li"),XD=r("It is not implemented for TPUs."),KD=l(),Ys=n("li"),QD=r("It works with "),Bf=n("code"),e7=r("--fp16"),t7=r(" too, to make things even faster."),o7=l(),Io=n("li"),r7=r("The "),Yf=n("code"),a7=r("cpu_offload"),n7=r(" additional option requires "),Zf=n("code"),s7=r("--fp16"),i7=r("."),l7=l(),Jf=n("li"),d7=r(`This is an area of active development, so make sure you have a source install of fairscale to use this feature as some bugs you encounter may have been fixed there already.`),_1=l(),Ol=n("p"),c7=r("Known caveats:"),v1=l(),Wr=n("ul"),Uo=n("li"),p7=r("This feature is incompatible with "),Xf=n("code"),m7=r("--predict_with_generate"),h7=r(" in the "),Kf=n("em"),u7=r("run_translation.py"),f7=r(" script."),g7=l(),ke=n("li"),_7=r("Using "),Qf=n("code"),v7=r("--sharded_ddp zero_dp_3"),b7=r(` requires wrapping each layer of the model in the special container `),eg=n("code"),y7=r("FullyShardedDataParallelism"),w7=r(" of fairscale. It should be used with the option "),tg=n("code"),E7=r("auto_wrap"),T7=r(` if you are not doing this yourself: `),og=n("code"),$7=r('--sharded_ddp "zero_dp_3 auto_wrap"'),k7=r("."),b1=l(),Cl=n("p"),x7=r("Sections that were moved:"),y1=l(),y=n("p"),A7=r("[ "),Il=n("a"),D7=r("DeepSpeed"),rg=n("a"),q7=r(` | `),Ul=n("a"),P7=r("Installation"),ag=n("a"),S7=r(` | `),Nl=n("a"),O7=r("Deployment with multiple GPUs"),ng=n("a"),C7=r(` | `),zl=n("a"),I7=r("Deployment with one GPU"),sg=n("a"),U7=r(` | `),Ll=n("a"),N7=r("Deployment in Notebooks"),ig=n("a"),z7=r(` | `),Fl=n("a"),L7=r("Configuration"),lg=n("a"),F7=r(` | `),Rl=n("a"),R7=r("Passing Configuration"),dg=n("a"),W7=r(` | `),Wl=n("a"),G7=r("Shared Configuration"),cg=n("a"),j7=r(` | `),Gl=n("a"),M7=r("ZeRO"),pg=n("a"),V7=r(` | `),jl=n("a"),H7=r("ZeRO-2 Config"),mg=n("a"),B7=r(` | `),Ml=n("a"),Y7=r("ZeRO-3 Config"),hg=n("a"),Z7=r(` | `),Vl=n("a"),J7=r("NVMe Support"),ug=n("a"),X7=r(` | `),Hl=n("a"),K7=r("ZeRO-2 vs ZeRO-3 Performance"),fg=n("a"),Q7=r(` | `),Bl=n("a"),eq=r("ZeRO-2 Example"),gg=n("a"),tq=r(` | `),Yl=n("a"),oq=r("ZeRO-3 Example"),_g=n("a"),rq=r(` | `),Zl=n("a"),aq=r("Optimizer"),vg=n("a"),nq=r(` | `),Jl=n("a"),sq=r("Scheduler"),bg=n("a"),iq=r(` | `),Xl=n("a"),lq=r("fp32 Precision"),yg=n("a"),dq=r(` | `),Kl=n("a"),cq=r("Automatic Mixed Precision"),wg=n("a"),pq=r(` | `),Ql=n("a"),mq=r("Batch Size"),Eg=n("a"),hq=r(` | `),ed=n("a"),uq=r("Gradient Accumulation"),Tg=n("a"),fq=r(` | `),td=n("a"),gq=r("Gradient Clipping"),$g=n("a"),_q=r(` | `),od=n("a"),vq=r("Getting The Model Weights Out"),kg=n("a"),bq=r(` ]`),this.h()},l(t){const c=dL('[data-svelte="svelte-1phssyn"]',document.head);T=s(c,"META",{name:!0,content:!0}),c.forEach(o),L=d(t),x=s(t,"H1",{class:!0});var Zs=i(x);S=s(Zs,"A",{id:!0,class:!0,href:!0});var xg=i(S);fe=s(xg,"SPAN",{});var Ag=i(fe);u(R.$$.fragment,Ag),Ag.forEach(o),xg.forEach(o),W=d(Zs),V=s(Zs,"SPAN",{});var Dg=i(V);ge=a(Dg,"Trainer"),Dg.forEach(o),Zs.forEach(o),ee=d(t),G=s(t,"P",{});var rd=i(G);ie=a(rd,"The "),le=s(rd,"A",{href:!0});var jq=i(le);te=a(jq,"Trainer"),jq.forEach(o),de=a(rd," class provides an API for feature-complete training in PyTorch for most standard use cases. It\u2019s used in most of the "),Y=s(rd,"A",{href:!0});var Mq=i(Y);Ze=a(Mq,"example scripts"),Mq.forEach(o),_e=a(rd,"."),rd.forEach(o),N=d(t),C=s(t,"P",{});var ad=i(C);at=a(ad,"Before instantiating your "),oe=s(ad,"A",{href:!0});var Vq=i(oe);nt=a(Vq,"Trainer"),Vq.forEach(o),st=a(ad,", create a "),ve=s(ad,"A",{href:!0});var Hq=i(ve);sa=a(Hq,"TrainingArguments"),Hq.forEach(o),ia=a(ad," to access all the points of customization during training."),ad.forEach(o),Je=d(t),Ae=s(t,"P",{});var E1=i(Ae);la=a(E1,"The API supports distributed training on multiple GPUs/TPUs, mixed precision through "),be=s(E1,"A",{href:!0,rel:!0});var Bq=i(be);da=a(Bq,"NVIDIA Apex"),Bq.forEach(o),ca=a(E1," and Native AMP for PyTorch."),E1.forEach(o),Z=d(t),H=s(t,"P",{});var T1=i(H);Qs=a(T1,"The "),ye=s(T1,"A",{href:!0});var Yq=i(ye);Lo=a(Yq,"Trainer"),Yq.forEach(o),ei=a(T1," contains the basic training loop which supports the above features. To inject custom behavior you can subclass them and override the following methods:"),T1.forEach(o),Kt=d(t),D=s(t,"UL",{});var U=i(D);B=s(U,"LI",{});var yq=i(B);Fo=s(yq,"STRONG",{});var Zq=i(Fo);ti=a(Zq,"get_train_dataloader"),Zq.forEach(o),oi=a(yq," \u2014 Creates the training DataLoader."),yq.forEach(o),ri=d(U),ai=s(U,"LI",{});var wq=i(ai);sc=s(wq,"STRONG",{});var Jq=i(sc);Qb=a(Jq,"get_eval_dataloader"),Jq.forEach(o),ey=a(wq," \u2014 Creates the evaluation DataLoader."),wq.forEach(o),ty=d(U),ni=s(U,"LI",{});var Eq=i(ni);ic=s(Eq,"STRONG",{});var Xq=i(ic);oy=a(Xq,"get_test_dataloader"),Xq.forEach(o),ry=a(Eq," \u2014 Creates the test DataLoader."),Eq.forEach(o),ay=d(U),si=s(U,"LI",{});var Tq=i(si);lc=s(Tq,"STRONG",{});var Kq=i(lc);ny=a(Kq,"log"),Kq.forEach(o),sy=a(Tq," \u2014 Logs information on the various objects watching training."),Tq.forEach(o),iy=d(U),it=s(U,"LI",{});var Js=i(it);dc=s(Js,"STRONG",{});var Qq=i(dc);ly=a(Qq,"create_optimizer_and_scheduler"),Qq.forEach(o),dy=a(Js,` \u2014 Sets up the optimizer and learning rate scheduler if they were not passed at init. Note, that you can also subclass or override the `),cc=s(Js,"CODE",{});var eP=i(cc);cy=a(eP,"create_optimizer"),eP.forEach(o),py=a(Js," and "),pc=s(Js,"CODE",{});var tP=i(pc);my=a(tP,"create_scheduler"),tP.forEach(o),hy=a(Js,` methods separately.`),Js.forEach(o),uy=d(U),ii=s(U,"LI",{});var $q=i(ii);mc=s($q,"STRONG",{});var oP=i(mc);fy=a(oP,"create_optimizer"),oP.forEach(o),gy=a($q," \u2014 Sets up the optimizer if it wasn\u2019t passed at init."),$q.forEach(o),_y=d(U),li=s(U,"LI",{});var kq=i(li);hc=s(kq,"STRONG",{});var rP=i(hc);vy=a(rP,"create_scheduler"),rP.forEach(o),by=a(kq," \u2014 Sets up the learning rate scheduler if it wasn\u2019t passed at init."),kq.forEach(o),yy=d(U),di=s(U,"LI",{});var xq=i(di);uc=s(xq,"STRONG",{});var aP=i(uc);wy=a(aP,"compute_loss"),aP.forEach(o),Ey=a(xq," - Computes the loss on a batch of training inputs."),xq.forEach(o),Ty=d(U),ci=s(U,"LI",{});var Aq=i(ci);fc=s(Aq,"STRONG",{});var nP=i(fc);$y=a(nP,"training_step"),nP.forEach(o),ky=a(Aq," \u2014 Performs a training step."),Aq.forEach(o),xy=d(U),pi=s(U,"LI",{});var Dq=i(pi);gc=s(Dq,"STRONG",{});var sP=i(gc);Ay=a(sP,"prediction_step"),sP.forEach(o),Dy=a(Dq," \u2014 Performs an evaluation/test step."),Dq.forEach(o),qy=d(U),mi=s(U,"LI",{});var qq=i(mi);_c=s(qq,"STRONG",{});var iP=i(_c);Py=a(iP,"evaluate"),iP.forEach(o),Sy=a(qq," \u2014 Runs an evaluation loop and returns metrics."),qq.forEach(o),Oy=d(U),hi=s(U,"LI",{});var Pq=i(hi);vc=s(Pq,"STRONG",{});var lP=i(vc);Cy=a(lP,"predict"),lP.forEach(o),Iy=a(Pq," \u2014 Returns predictions (with metrics if labels are available) on a test set."),Pq.forEach(o),U.forEach(o),Og=d(t),u(Ro.$$.fragment,t),Cg=d(t),Wo=s(t,"P",{});var $1=i(Wo);Uy=a($1,"Here is an example of how to customize "),ui=s($1,"A",{href:!0});var dP=i(ui);Ny=a(dP,"Trainer"),dP.forEach(o),zy=a($1," to use a weighted loss (useful when you have an unbalanced training set):"),$1.forEach(o),Ig=d(t),u(pa.$$.fragment,t),Ug=d(t),lt=s(t,"P",{});var nd=i(lt);Ly=a(nd,"Another way to customize the training loop behavior for the PyTorch "),fi=s(nd,"A",{href:!0});var cP=i(fi);Fy=a(cP,"Trainer"),cP.forEach(o),Ry=a(nd," is to use "),gi=s(nd,"A",{href:!0});var pP=i(gi);Wy=a(pP,"callbacks"),pP.forEach(o),Gy=a(nd," that can inspect the training loop state (for progress reporting, logging on TensorBoard or other ML platforms\u2026) and take decisions (like early stopping)."),nd.forEach(o),Ng=d(t),Qt=s(t,"H2",{class:!0});var k1=i(Qt);Go=s(k1,"A",{id:!0,class:!0,href:!0});var mP=i(Go);bc=s(mP,"SPAN",{});var hP=i(bc);u(ma.$$.fragment,hP),hP.forEach(o),mP.forEach(o),jy=d(k1),yc=s(k1,"SPAN",{});var uP=i(yc);My=a(uP,"Trainer"),uP.forEach(o),k1.forEach(o),zg=d(t),b=s(t,"DIV",{class:!0});var w=i(b);u(ha.$$.fragment,w),Vy=d(w),wc=s(w,"P",{});var fP=i(wc);Hy=a(fP,"Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for \u{1F917} Transformers."),fP.forEach(o),By=d(w),Ec=s(w,"P",{});var gP=i(Ec);Yy=a(gP,"Important attributes:"),gP.forEach(o),Zy=d(w),we=s(w,"UL",{});var Gt=i(we);jo=s(Gt,"LI",{});var qg=i(jo);Tc=s(qg,"STRONG",{});var _P=i(Tc);Jy=a(_P,"model"),_P.forEach(o),Xy=a(qg," \u2014 Always points to the core model. If using a transformers model, it will be a "),_i=s(qg,"A",{href:!0});var vP=i(_i);Ky=a(vP,"PreTrainedModel"),vP.forEach(o),Qy=a(qg,` subclass.`),qg.forEach(o),e2=d(Gt),J=s(Gt,"LI",{});var xe=i(J);$c=s(xe,"STRONG",{});var bP=i($c);t2=a(bP,"model_wrapped"),bP.forEach(o),o2=a(xe,` \u2014 Always points to the most external model in case one or more other modules wrap the original model. This is the model that should be used for the forward pass. For example, under `),kc=s(xe,"CODE",{});var yP=i(kc);r2=a(yP,"DeepSpeed"),yP.forEach(o),a2=a(xe,`, the inner model is wrapped in `),xc=s(xe,"CODE",{});var wP=i(xc);n2=a(wP,"DeepSpeed"),wP.forEach(o),s2=a(xe," and then again in "),Ac=s(xe,"CODE",{});var EP=i(Ac);i2=a(EP,"torch.nn.DistributedDataParallel"),EP.forEach(o),l2=a(xe,`. If the inner model hasn\u2019t been wrapped, then `),Dc=s(xe,"CODE",{});var TP=i(Dc);d2=a(TP,"self.model_wrapped"),TP.forEach(o),c2=a(xe," is the same as "),qc=s(xe,"CODE",{});var $P=i(qc);p2=a($P,"self.model"),$P.forEach(o),m2=a(xe,"."),xe.forEach(o),h2=d(Gt),vi=s(Gt,"LI",{});var Sq=i(vi);Pc=s(Sq,"STRONG",{});var kP=i(Pc);u2=a(kP,"is_model_parallel"),kP.forEach(o),f2=a(Sq,` \u2014 Whether or not a model has been switched to a model parallel mode (different from data parallelism, this means some of the model layers are split on different GPUs).`),Sq.forEach(o),g2=d(Gt),De=s(Gt,"LI",{});var No=i(De);Sc=s(No,"STRONG",{});var xP=i(Sc);_2=a(xP,"place_model_on_device"),xP.forEach(o),v2=a(No,` \u2014 Whether or not to automatically place the model on the device - it will be set to `),Oc=s(No,"CODE",{});var AP=i(Oc);b2=a(AP,"False"),AP.forEach(o),y2=a(No,` if model parallel or deepspeed is used, or if the default `),Cc=s(No,"CODE",{});var DP=i(Cc);w2=a(DP,"TrainingArguments.place_model_on_device"),DP.forEach(o),E2=a(No," is overridden to return "),Ic=s(No,"CODE",{});var qP=i(Ic);T2=a(qP,"False"),qP.forEach(o),$2=a(No," ."),No.forEach(o),k2=d(Gt),qe=s(Gt,"LI",{});var zo=i(qe);Uc=s(zo,"STRONG",{});var PP=i(Uc);x2=a(PP,"is_in_train"),PP.forEach(o),A2=a(zo," \u2014 Whether or not a model is currently running "),Nc=s(zo,"CODE",{});var SP=i(Nc);D2=a(SP,"train"),SP.forEach(o),q2=a(zo," (e.g. when "),zc=s(zo,"CODE",{});var OP=i(zc);P2=a(OP,"evaluate"),OP.forEach(o),S2=a(zo,` is called while in `),Lc=s(zo,"CODE",{});var CP=i(Lc);O2=a(CP,"train"),CP.forEach(o),C2=a(zo,")"),zo.forEach(o),Gt.forEach(o),I2=d(w),Mo=s(w,"DIV",{class:!0});var x1=i(Mo);u(ua.$$.fragment,x1),U2=d(x1),fa=s(x1,"P",{});var A1=i(fa);N2=a(A1,"Add a callback to the current list of "),Fc=s(A1,"CODE",{});var IP=i(Fc);z2=a(IP,"TrainerCallback"),IP.forEach(o),L2=a(A1,"."),A1.forEach(o),x1.forEach(o),F2=d(w),Vo=s(w,"DIV",{class:!0});var D1=i(Vo);u(ga.$$.fragment,D1),R2=d(D1),_a=s(D1,"P",{});var q1=i(_a);W2=a(q1,"A helper wrapper that creates an appropriate context manager for "),Rc=s(q1,"CODE",{});var UP=i(Rc);G2=a(UP,"autocast"),UP.forEach(o),j2=a(q1,` while feeding it the desired arguments, depending on the situation.`),q1.forEach(o),D1.forEach(o),M2=d(w),dt=s(w,"DIV",{class:!0});var sd=i(dt);u(va.$$.fragment,sd),V2=d(sd),Wc=s(sd,"P",{});var NP=i(Wc);H2=a(NP,"How the loss is computed by Trainer. By default, all models return the loss in the first element."),NP.forEach(o),B2=d(sd),Gc=s(sd,"P",{});var zP=i(Gc);Y2=a(zP,"Subclass and override for custom behavior."),zP.forEach(o),sd.forEach(o),Z2=d(w),ct=s(w,"DIV",{class:!0});var id=i(ct);u(ba.$$.fragment,id),J2=d(id),jc=s(id,"P",{});var LP=i(jc);X2=a(LP,"Setup the optimizer."),LP.forEach(o),K2=d(id),ya=s(id,"P",{});var P1=i(ya);Q2=a(P1,`We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the Trainer\u2019s init through `),Mc=s(P1,"CODE",{});var FP=i(Mc);ew=a(FP,"optimizers"),FP.forEach(o),tw=a(P1,", or subclass and override this method in a subclass."),P1.forEach(o),id.forEach(o),ow=d(w),pt=s(w,"DIV",{class:!0});var ld=i(pt);u(wa.$$.fragment,ld),rw=d(ld),Vc=s(ld,"P",{});var RP=i(Vc);aw=a(RP,"Setup the optimizer and the learning rate scheduler."),RP.forEach(o),nw=d(ld),Xe=s(ld,"P",{});var Gr=i(Xe);sw=a(Gr,`We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the Trainer\u2019s init through `),Hc=s(Gr,"CODE",{});var WP=i(Hc);iw=a(WP,"optimizers"),WP.forEach(o),lw=a(Gr,", or subclass and override this method (or "),Bc=s(Gr,"CODE",{});var GP=i(Bc);dw=a(GP,"create_optimizer"),GP.forEach(o),cw=a(Gr,` and/or `),Yc=s(Gr,"CODE",{});var jP=i(Yc);pw=a(jP,"create_scheduler"),jP.forEach(o),mw=a(Gr,") in a subclass."),Gr.forEach(o),ld.forEach(o),hw=d(w),Ho=s(w,"DIV",{class:!0});var S1=i(Ho);u(Ea.$$.fragment,S1),uw=d(S1),Zc=s(S1,"P",{});var MP=i(Zc);fw=a(MP,`Setup the scheduler. The optimizer of the trainer must have been set up either before this method is called or passed as an argument.`),MP.forEach(o),S1.forEach(o),gw=d(w),Pe=s(w,"DIV",{class:!0});var jr=i(Pe);u(Ta.$$.fragment,jr),_w=d(jr),Jc=s(jr,"P",{});var VP=i(Jc);vw=a(VP,"Run evaluation and returns metrics."),VP.forEach(o),bw=d(jr),$a=s(jr,"P",{});var O1=i($a);yw=a(O1,`The calling script will be responsible for providing a method to compute metrics, as they are task-dependent (pass it to the init `),Xc=s(O1,"CODE",{});var HP=i(Xc);ww=a(HP,"compute_metrics"),HP.forEach(o),Ew=a(O1," argument)."),O1.forEach(o),Tw=d(jr),Kc=s(jr,"P",{});var BP=i(Kc);$w=a(BP,"You can also subclass and override this method to inject custom behavior."),BP.forEach(o),jr.forEach(o),kw=d(w),mt=s(w,"DIV",{class:!0});var dd=i(mt);u(ka.$$.fragment,dd),xw=d(dd),eo=s(dd,"P",{});var cd=i(eo);Aw=a(cd,"Prediction/evaluation loop, shared by "),Qc=s(cd,"CODE",{});var YP=i(Qc);Dw=a(YP,"Trainer.evaluate()"),YP.forEach(o),qw=a(cd," and "),ep=s(cd,"CODE",{});var ZP=i(ep);Pw=a(ZP,"Trainer.predict()"),ZP.forEach(o),Sw=a(cd,"."),cd.forEach(o),Ow=d(dd),tp=s(dd,"P",{});var JP=i(tp);Cw=a(JP,"Works both with or without labels."),JP.forEach(o),dd.forEach(o),Iw=d(w),Bo=s(w,"DIV",{class:!0});var C1=i(Bo);u(xa.$$.fragment,C1),Uw=d(C1),Aa=s(C1,"P",{});var I1=i(Aa);Nw=a(I1,"For models that inherit from "),bi=s(I1,"A",{href:!0});var XP=i(bi);zw=a(XP,"PreTrainedModel"),XP.forEach(o),Lw=a(I1,`, uses that method to compute the number of floating point operations for every backward + forward pass. If using another model, either implement such a method in the model or subclass and override this method.`),I1.forEach(o),C1.forEach(o),Fw=d(w),ht=s(w,"DIV",{class:!0});var pd=i(ht);u(Da.$$.fragment,pd),Rw=d(pd),qa=s(pd,"P",{});var U1=i(qa);Ww=a(U1,"Returns the evaluation "),op=s(U1,"CODE",{});var KP=i(op);Gw=a(KP,"DataLoader"),KP.forEach(o),jw=a(U1,"."),U1.forEach(o),Mw=d(pd),rp=s(pd,"P",{});var QP=i(rp);Vw=a(QP,"Subclass and override this method if you want to inject some custom behavior."),QP.forEach(o),pd.forEach(o),Hw=d(w),Yo=s(w,"DIV",{class:!0});var N1=i(Yo);u(Pa.$$.fragment,N1),Bw=d(N1),ap=s(N1,"P",{});var eS=i(ap);Yw=a(eS,"Returns the optimizer class and optimizer parameters based on the training arguments."),eS.forEach(o),N1.forEach(o),Zw=d(w),ut=s(w,"DIV",{class:!0});var md=i(ut);u(Sa.$$.fragment,md),Jw=d(md),Oa=s(md,"P",{});var z1=i(Oa);Xw=a(z1,"Returns the test "),np=s(z1,"CODE",{});var tS=i(np);Kw=a(tS,"DataLoader"),tS.forEach(o),Qw=a(z1,"."),z1.forEach(o),e0=d(md),sp=s(md,"P",{});var oS=i(sp);t0=a(oS,"Subclass and override this method if you want to inject some custom behavior."),oS.forEach(o),md.forEach(o),o0=d(w),Se=s(w,"DIV",{class:!0});var Mr=i(Se);u(Ca.$$.fragment,Mr),r0=d(Mr),Ia=s(Mr,"P",{});var L1=i(Ia);a0=a(L1,"Returns the training "),ip=s(L1,"CODE",{});var rS=i(ip);n0=a(rS,"DataLoader"),rS.forEach(o),s0=a(L1,"."),L1.forEach(o),i0=d(Mr),to=s(Mr,"P",{});var hd=i(to);l0=a(hd,"Will use no sampler if "),lp=s(hd,"CODE",{});var aS=i(lp);d0=a(aS,"self.train_dataset"),aS.forEach(o),c0=a(hd," does not implement "),dp=s(hd,"CODE",{});var nS=i(dp);p0=a(nS,"__len__"),nS.forEach(o),m0=a(hd,`, a random sampler (adapted to distributed training if necessary) otherwise.`),hd.forEach(o),h0=d(Mr),cp=s(Mr,"P",{});var sS=i(cp);u0=a(sS,"Subclass and override this method if you want to inject some custom behavior."),sS.forEach(o),Mr.forEach(o),f0=d(w),ft=s(w,"DIV",{class:!0});var ud=i(ft);u(Ua.$$.fragment,ud),g0=d(ud),Ee=s(ud,"P",{});var jt=i(Ee);_0=a(jt,"Launch an hyperparameter search using "),pp=s(jt,"CODE",{});var iS=i(pp);v0=a(iS,"optuna"),iS.forEach(o),b0=a(jt," or "),mp=s(jt,"CODE",{});var lS=i(mp);y0=a(lS,"Ray Tune"),lS.forEach(o),w0=a(jt," or "),hp=s(jt,"CODE",{});var dS=i(hp);E0=a(dS,"SigOpt"),dS.forEach(o),T0=a(jt,`. The optimized quantity is determined by `),up=s(jt,"CODE",{});var cS=i(up);$0=a(cS,"compute_objective"),cS.forEach(o),k0=a(jt,`, which defaults to a function returning the evaluation loss when no metric is provided, the sum of all metrics otherwise.`),jt.forEach(o),x0=d(ud),u(Zo.$$.fragment,ud),ud.forEach(o),A0=d(w),Jo=s(w,"DIV",{class:!0});var F1=i(Jo);u(Na.$$.fragment,F1),D0=d(F1),za=s(F1,"P",{});var R1=i(za);q0=a(R1,"Initializes a git repo in "),fp=s(R1,"CODE",{});var pS=i(fp);P0=a(pS,"self.args.hub_model_id"),pS.forEach(o),S0=a(R1,"."),R1.forEach(o),F1.forEach(o),O0=d(w),Xo=s(w,"DIV",{class:!0});var W1=i(Xo);u(La.$$.fragment,W1),C0=d(W1),gp=s(W1,"P",{});var mS=i(gp);I0=a(mS,`Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several machines) main process.`),mS.forEach(o),W1.forEach(o),U0=d(w),Ko=s(w,"DIV",{class:!0});var G1=i(Ko);u(Fa.$$.fragment,G1),N0=d(G1),Ra=s(G1,"P",{});var j1=i(Ra);z0=a(j1,`Whether or not this process is the global main process (when training in a distributed fashion on several machines, this is only going to be `),_p=s(j1,"CODE",{});var hS=i(_p);L0=a(hS,"True"),hS.forEach(o),F0=a(j1," for one process)."),j1.forEach(o),G1.forEach(o),R0=d(w),gt=s(w,"DIV",{class:!0});var fd=i(gt);u(Wa.$$.fragment,fd),W0=d(fd),Ga=s(fd,"P",{});var M1=i(Ga);G0=a(M1,"Log "),vp=s(M1,"CODE",{});var uS=i(vp);j0=a(uS,"logs"),uS.forEach(o),M0=a(M1," on the various objects watching training."),M1.forEach(o),V0=d(fd),bp=s(fd,"P",{});var fS=i(bp);H0=a(fS,"Subclass and override this method to inject custom behavior."),fS.forEach(o),fd.forEach(o),B0=d(w),k=s(w,"DIV",{class:!0});var A=i(k);u(ja.$$.fragment,A),Y0=d(A),yp=s(A,"P",{});var gS=i(yp);Z0=a(gS,"Log metrics in a specially formatted way"),gS.forEach(o),J0=d(A),wp=s(A,"P",{});var _S=i(wp);X0=a(_S,"Under distributed environment this is done only for a process with rank 0."),_S.forEach(o),K0=d(A),Ep=s(A,"P",{});var vS=i(Ep);Q0=a(vS,"Notes on memory reports:"),vS.forEach(o),eE=d(A),oo=s(A,"P",{});var gd=i(oo);tE=a(gd,"In order to get memory usage report you need to install "),Tp=s(gd,"CODE",{});var bS=i(Tp);oE=a(bS,"psutil"),bS.forEach(o),rE=a(gd,". You can do that with "),$p=s(gd,"CODE",{});var yS=i($p);aE=a(yS,"pip install psutil"),yS.forEach(o),nE=a(gd,"."),gd.forEach(o),sE=d(A),kp=s(A,"P",{});var wS=i(kp);iE=a(wS,"Now when this method is run, you will see a report that will include: :"),wS.forEach(o),lE=d(A),u(Ma.$$.fragment,A),dE=d(A),xp=s(A,"P",{});var ES=i(xp);Ap=s(ES,"STRONG",{});var TS=i(Ap);cE=a(TS,"Understanding the reports:"),TS.forEach(o),ES.forEach(o),pE=d(A),Ke=s(A,"UL",{});var Vr=i(Ke);Te=s(Vr,"LI",{});var Mt=i(Te);mE=a(Mt,"the first segment, e.g., "),Dp=s(Mt,"CODE",{});var $S=i(Dp);hE=a($S,"train__"),$S.forEach(o),uE=a(Mt,", tells you which stage the metrics are for. Reports starting with "),qp=s(Mt,"CODE",{});var kS=i(qp);fE=a(kS,"init_"),kS.forEach(o),gE=a(Mt,` will be added to the first stage that gets run. So that if only evaluation is run, the memory usage for the `),Pp=s(Mt,"CODE",{});var xS=i(Pp);_E=a(xS,"__init__"),xS.forEach(o),vE=a(Mt," will be reported along with the "),Sp=s(Mt,"CODE",{});var AS=i(Sp);bE=a(AS,"eval_"),AS.forEach(o),yE=a(Mt," metrics."),Mt.forEach(o),wE=d(Vr),ro=s(Vr,"LI",{});var _d=i(ro);EE=a(_d,"the third segment, is either "),Op=s(_d,"CODE",{});var DS=i(Op);TE=a(DS,"cpu"),DS.forEach(o),$E=a(_d," or "),Cp=s(_d,"CODE",{});var qS=i(Cp);kE=a(qS,"gpu"),qS.forEach(o),xE=a(_d,`, tells you whether it\u2019s the general RAM or the gpu0 memory metric.`),_d.forEach(o),AE=d(Vr),yi=s(Vr,"LI",{});var Oq=i(yi);Ip=s(Oq,"CODE",{});var PS=i(Ip);DE=a(PS,"*_alloc_delta"),PS.forEach(o),qE=a(Oq,` - is the difference in the used/allocated memory counter between the end and the start of the stage - it can be negative if a function released more memory than it allocated.`),Oq.forEach(o),PE=d(Vr),_t=s(Vr,"LI",{});var Xs=i(_t);Up=s(Xs,"CODE",{});var SS=i(Up);SE=a(SS,"*_peaked_delta"),SS.forEach(o),OE=a(Xs,` - is any extra memory that was consumed and then freed - relative to the current allocated memory counter - it is never negative. When you look at the metrics of any stage you add up `),Np=s(Xs,"CODE",{});var OS=i(Np);CE=a(OS,"alloc_delta"),OS.forEach(o),IE=a(Xs,` + `),zp=s(Xs,"CODE",{});var CS=i(zp);UE=a(CS,"peaked_delta"),CS.forEach(o),NE=a(Xs," and you know how much memory was needed to complete that stage."),Xs.forEach(o),Vr.forEach(o),zE=d(A),Lp=s(A,"P",{});var IS=i(Lp);LE=a(IS,`The reporting happens only for process of rank 0 and gpu 0 (if there is a gpu). Typically this is enough since the main process does the bulk of work, but it could be not quite so if model parallel is used and then other GPUs may use a different amount of gpu memory. This is also not the same under DataParallel where gpu0 may require much more memory than the rest since it stores the gradient and optimizer states for all participating GPUS. Perhaps in the future these reports will evolve to measure those too.`),IS.forEach(o),FE=d(A),Fp=s(A,"P",{});var US=i(Fp);RE=a(US,`The CPU RAM metric measures RSS (Resident Set Size) includes both the memory which is unique to the process and the memory shared with other processes. It is important to note that it does not include swapped out memory, so the reports could be imprecise.`),US.forEach(o),WE=d(A),Va=s(A,"P",{});var V1=i(Va);GE=a(V1,`The CPU peak memory is measured using a sampling thread. Due to python\u2019s GIL it may miss some of the peak memory if that thread didn\u2019t get a chance to run when the highest memory was used. Therefore this report can be less than reality. Using `),Rp=s(V1,"CODE",{});var NS=i(Rp);jE=a(NS,"tracemalloc"),NS.forEach(o),ME=a(V1,` would have reported the exact peak memory, but it doesn\u2019t report memory allocations outside of python. So if some C++ CUDA extension allocated its own memory it won\u2019t be reported. And therefore it was dropped in favor of the memory sampling approach, which reads the current process memory usage.`),V1.forEach(o),VE=d(A),Qe=s(A,"P",{});var Hr=i(Qe);HE=a(Hr,"The GPU allocated and peak memory reporting is done with "),Wp=s(Hr,"CODE",{});var zS=i(Wp);BE=a(zS,"torch.cuda.memory_allocated()"),zS.forEach(o),YE=a(Hr,` and `),Gp=s(Hr,"CODE",{});var LS=i(Gp);ZE=a(LS,"torch.cuda.max_memory_allocated()"),LS.forEach(o),JE=a(Hr,`. This metric reports only \u201Cdeltas\u201D for pytorch-specific allocations, as `),jp=s(Hr,"CODE",{});var FS=i(jp);XE=a(FS,"torch.cuda"),FS.forEach(o),KE=a(Hr,` memory management system doesn\u2019t track any memory allocated outside of pytorch. For example, the very first cuda call typically loads CUDA kernels, which may take from 0.5 to 2GB of GPU memory.`),Hr.forEach(o),QE=d(A),re=s(A,"P",{});var je=i(re);e4=a(je,"Note that this tracker doesn\u2019t account for memory allocations outside of "),wi=s(je,"A",{href:!0});var RS=i(wi);t4=a(RS,"Trainer"),RS.forEach(o),o4=a(je,"\u2019s "),Mp=s(je,"CODE",{});var WS=i(Mp);r4=a(WS,"__init__"),WS.forEach(o),a4=a(je,", "),Vp=s(je,"CODE",{});var GS=i(Vp);n4=a(GS,"train"),GS.forEach(o),s4=a(je,`, `),Hp=s(je,"CODE",{});var jS=i(Hp);i4=a(jS,"evaluate"),jS.forEach(o),l4=a(je," and "),Bp=s(je,"CODE",{});var MS=i(Bp);d4=a(MS,"predict"),MS.forEach(o),c4=a(je," calls."),je.forEach(o),p4=d(A),I=s(A,"P",{});var F=i(I);m4=a(F,"Because "),Yp=s(F,"CODE",{});var VS=i(Yp);h4=a(VS,"evaluation"),VS.forEach(o),u4=a(F," calls may happen during "),Zp=s(F,"CODE",{});var HS=i(Zp);f4=a(HS,"train"),HS.forEach(o),g4=a(F,`, we can\u2019t handle nested invocations because `),Jp=s(F,"CODE",{});var BS=i(Jp);_4=a(BS,"torch.cuda.max_memory_allocated"),BS.forEach(o),v4=a(F," is a single counter, so if it gets reset by a nested eval call, "),Xp=s(F,"CODE",{});var YS=i(Xp);b4=a(YS,"train"),YS.forEach(o),y4=a(F,`\u2019s tracker will report incorrect info. If this `),Ha=s(F,"A",{href:!0,rel:!0});var ZS=i(Ha);w4=a(ZS,"pytorch issue"),ZS.forEach(o),E4=a(F,` gets resolved it will be possible to change this class to be re-entrant. Until then we will only track the outer level of `),Kp=s(F,"CODE",{});var JS=i(Kp);T4=a(JS,"train"),JS.forEach(o),$4=a(F,", "),Qp=s(F,"CODE",{});var XS=i(Qp);k4=a(XS,"evaluate"),XS.forEach(o),x4=a(F," and "),em=s(F,"CODE",{});var KS=i(em);A4=a(KS,"predict"),KS.forEach(o),D4=a(F," methods. Which means that if "),tm=s(F,"CODE",{});var QS=i(tm);q4=a(QS,"eval"),QS.forEach(o),P4=a(F," is called during "),om=s(F,"CODE",{});var eO=i(om);S4=a(eO,"train"),eO.forEach(o),O4=a(F,`, it\u2019s the latter that will account for its memory usage and that of the former.`),F.forEach(o),C4=d(A),$e=s(A,"P",{});var Vt=i($e);I4=a(Vt,"This also means that if any other tool that is used along the "),Ei=s(Vt,"A",{href:!0});var tO=i(Ei);U4=a(tO,"Trainer"),tO.forEach(o),N4=a(Vt,` calls `),rm=s(Vt,"CODE",{});var oO=i(rm);z4=a(oO,"torch.cuda.reset_peak_memory_stats"),oO.forEach(o),L4=a(Vt,", the gpu peak memory stats could be invalid. And the "),Ti=s(Vt,"A",{href:!0});var rO=i(Ti);F4=a(rO,"Trainer"),rO.forEach(o),R4=a(Vt,` will disrupt the normal behavior of any such tools that rely on calling `),am=s(Vt,"CODE",{});var aO=i(am);W4=a(aO,"torch.cuda.reset_peak_memory_stats"),aO.forEach(o),G4=a(Vt," themselves."),Vt.forEach(o),j4=d(A),nm=s(A,"P",{});var nO=i(nm);M4=a(nO,"For best performance you may want to consider turning the memory profiling off for production runs."),nO.forEach(o),A.forEach(o),V4=d(w),Qo=s(w,"DIV",{class:!0});var H1=i(Qo);u(Ba.$$.fragment,H1),H4=d(H1),sm=s(H1,"P",{});var sO=i(sm);B4=a(sO,"Reformat Trainer metrics values to a human-readable format"),sO.forEach(o),H1.forEach(o),Y4=d(w),vt=s(w,"DIV",{class:!0});var vd=i(vt);u(Ya.$$.fragment,vd),Z4=d(vd),Za=s(vd,"P",{});var B1=i(Za);J4=a(B1,"Helper to get number of samples in a "),im=s(B1,"CODE",{});var iO=i(im);X4=a(iO,"DataLoader"),iO.forEach(o),K4=a(B1," by accessing its dataset."),B1.forEach(o),Q4=d(vd),$i=s(vd,"P",{});var Cq=i($i);eT=a(Cq,"Will raise an exception if the underlying dataset does not implement method "),lm=s(Cq,"CODE",{});var lO=i(lm);tT=a(lO,"__len__"),lO.forEach(o),Cq.forEach(o),vd.forEach(o),oT=d(w),bt=s(w,"DIV",{class:!0});var bd=i(bt);u(Ja.$$.fragment,bd),rT=d(bd),Xa=s(bd,"P",{});var Y1=i(Xa);aT=a(Y1,"Remove a callback from the current list of "),dm=s(Y1,"CODE",{});var dO=i(dm);nT=a(dO,"TrainerCallback"),dO.forEach(o),sT=a(Y1," and returns it."),Y1.forEach(o),iT=d(bd),Ka=s(bd,"P",{});var Z1=i(Ka);lT=a(Z1,"If the callback is not found, returns "),cm=s(Z1,"CODE",{});var cO=i(cm);dT=a(cO,"None"),cO.forEach(o),cT=a(Z1," (and no error is raised)."),Z1.forEach(o),bd.forEach(o),pT=d(w),X=s(w,"DIV",{class:!0});var Me=i(X);u(Qa.$$.fragment,Me),mT=d(Me),pm=s(Me,"P",{});var pO=i(pm);hT=a(pO,"Run prediction and returns predictions and potential metrics."),pO.forEach(o),uT=d(Me),en=s(Me,"P",{});var J1=i(en);fT=a(J1,`Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method will also return metrics, like in `),mm=s(J1,"CODE",{});var mO=i(mm);gT=a(mO,"evaluate()"),mO.forEach(o),_T=a(J1,"."),J1.forEach(o),vT=d(Me),u(er.$$.fragment,Me),bT=d(Me),tn=s(Me,"P",{});var X1=i(tn);yT=a(X1,"Returns: "),hm=s(X1,"EM",{});var hO=i(hm);wT=a(hO,"NamedTuple"),hO.forEach(o),ET=a(X1," A namedtuple with the following keys:"),X1.forEach(o),TT=d(Me),ao=s(Me,"UL",{});var yd=i(ao);no=s(yd,"LI",{});var wd=i(no);$T=a(wd,"predictions ("),um=s(wd,"CODE",{});var uO=i(um);kT=a(uO,"np.ndarray"),uO.forEach(o),xT=a(wd,"): The predictions on "),fm=s(wd,"CODE",{});var fO=i(fm);AT=a(fO,"test_dataset"),fO.forEach(o),DT=a(wd,"."),wd.forEach(o),qT=d(yd),so=s(yd,"LI",{});var Ed=i(so);PT=a(Ed,"label_ids ("),gm=s(Ed,"CODE",{});var gO=i(gm);ST=a(gO,"np.ndarray"),gO.forEach(o),OT=a(Ed,", "),_m=s(Ed,"EM",{});var _O=i(_m);CT=a(_O,"optional"),_O.forEach(o),IT=a(Ed,"): The labels (if the dataset contained some)."),Ed.forEach(o),UT=d(yd),io=s(yd,"LI",{});var Td=i(io);NT=a(Td,"metrics ("),vm=s(Td,"CODE",{});var vO=i(vm);zT=a(vO,"Dict[str, float]"),vO.forEach(o),LT=a(Td,", "),bm=s(Td,"EM",{});var bO=i(bm);FT=a(bO,"optional"),bO.forEach(o),RT=a(Td,`): The potential dictionary of metrics (if the dataset contained labels).`),Td.forEach(o),yd.forEach(o),Me.forEach(o),WT=d(w),yt=s(w,"DIV",{class:!0});var $d=i(yt);u(on.$$.fragment,$d),GT=d($d),lo=s($d,"P",{});var kd=i(lo);jT=a(kd,"Prediction/evaluation loop, shared by "),ym=s(kd,"CODE",{});var yO=i(ym);MT=a(yO,"Trainer.evaluate()"),yO.forEach(o),VT=a(kd," and "),wm=s(kd,"CODE",{});var wO=i(wm);HT=a(wO,"Trainer.predict()"),wO.forEach(o),BT=a(kd,"."),kd.forEach(o),YT=d($d),Em=s($d,"P",{});var EO=i(Em);ZT=a(EO,"Works both with or without labels."),EO.forEach(o),$d.forEach(o),JT=d(w),wt=s(w,"DIV",{class:!0});var xd=i(wt);u(rn.$$.fragment,xd),XT=d(xd),co=s(xd,"P",{});var Ad=i(co);KT=a(Ad,"Perform an evaluation step on "),Tm=s(Ad,"CODE",{});var TO=i(Tm);QT=a(TO,"model"),TO.forEach(o),e3=a(Ad," using "),$m=s(Ad,"CODE",{});var $O=i($m);t3=a($O,"inputs"),$O.forEach(o),o3=a(Ad,"."),Ad.forEach(o),r3=d(xd),km=s(xd,"P",{});var kO=i(km);a3=a(kO,"Subclass and override to inject custom behavior."),kO.forEach(o),xd.forEach(o),n3=d(w),tr=s(w,"DIV",{class:!0});var K1=i(tr);u(an.$$.fragment,K1),s3=d(K1),et=s(K1,"P",{});var Br=i(et);i3=a(Br,"Upload "),xm=s(Br,"EM",{});var xO=i(xm);l3=a(xO,"self.model"),xO.forEach(o),d3=a(Br," and "),Am=s(Br,"EM",{});var AO=i(Am);c3=a(AO,"self.tokenizer"),AO.forEach(o),p3=a(Br," to the \u{1F917} model hub on the repo "),Dm=s(Br,"EM",{});var DO=i(Dm);m3=a(DO,"self.args.hub_model_id"),DO.forEach(o),h3=a(Br,"."),Br.forEach(o),K1.forEach(o),u3=d(w),or=s(w,"DIV",{class:!0});var Q1=i(or);u(nn.$$.fragment,Q1),f3=d(Q1),sn=s(Q1,"P",{});var eb=i(sn);g3=a(eb,"Remove a callback from the current list of "),qm=s(eb,"CODE",{});var qO=i(qm);_3=a(qO,"TrainerCallback"),qO.forEach(o),v3=a(eb,"."),eb.forEach(o),Q1.forEach(o),b3=d(w),Oe=s(w,"DIV",{class:!0});var Yr=i(Oe);u(ln.$$.fragment,Yr),y3=d(Yr),dn=s(Yr,"P",{});var tb=i(dn);w3=a(tb,"Save metrics into a json file for that split, e.g. "),Pm=s(tb,"CODE",{});var PO=i(Pm);E3=a(PO,"train_results.json"),PO.forEach(o),T3=a(tb,"."),tb.forEach(o),$3=d(Yr),Sm=s(Yr,"P",{});var SO=i(Sm);k3=a(SO,"Under distributed environment this is done only for a process with rank 0."),SO.forEach(o),x3=d(Yr),cn=s(Yr,"P",{});var ob=i(cn);A3=a(ob,"To understand the metrics please read the docstring of "),Om=s(ob,"CODE",{});var OO=i(Om);D3=a(OO,"log_metrics()"),OO.forEach(o),q3=a(ob,` The only difference is that raw unformatted numbers are saved in the current method.`),ob.forEach(o),Yr.forEach(o),P3=d(w),Et=s(w,"DIV",{class:!0});var Dd=i(Et);u(pn.$$.fragment,Dd),S3=d(Dd),mn=s(Dd,"P",{});var rb=i(mn);O3=a(rb,"Will save the model, so you can reload it using "),Cm=s(rb,"CODE",{});var CO=i(Cm);C3=a(CO,"from_pretrained()"),CO.forEach(o),I3=a(rb,"."),rb.forEach(o),U3=d(Dd),Im=s(Dd,"P",{});var IO=i(Im);N3=a(IO,"Will only save from the main process."),IO.forEach(o),Dd.forEach(o),z3=d(w),Tt=s(w,"DIV",{class:!0});var qd=i(Tt);u(hn.$$.fragment,qd),L3=d(qd),Um=s(qd,"P",{});var UO=i(Um);F3=a(UO,"Saves the Trainer state, since Trainer.save_model saves only the tokenizer with the model"),UO.forEach(o),R3=d(qd),Nm=s(qd,"P",{});var NO=i(Nm);W3=a(NO,"Under distributed environment this is done only for a process with rank 0."),NO.forEach(o),qd.forEach(o),G3=d(w),rr=s(w,"DIV",{class:!0});var ab=i(rr);u(un.$$.fragment,ab),j3=d(ab),zm=s(ab,"P",{});var zO=i(zm);M3=a(zO,"Main training entry point."),zO.forEach(o),ab.forEach(o),V3=d(w),$t=s(w,"DIV",{class:!0});var Pd=i($t);u(fn.$$.fragment,Pd),H3=d(Pd),Lm=s(Pd,"P",{});var LO=i(Lm);B3=a(LO,"Perform a training step on a batch of inputs."),LO.forEach(o),Y3=d(Pd),Fm=s(Pd,"P",{});var FO=i(Fm);Z3=a(FO,"Subclass and override to inject custom behavior."),FO.forEach(o),Pd.forEach(o),w.forEach(o),Lg=d(t),po=s(t,"H2",{class:!0});var nb=i(po);ar=s(nb,"A",{id:!0,class:!0,href:!0});var RO=i(ar);Rm=s(RO,"SPAN",{});var WO=i(Rm);u(gn.$$.fragment,WO),WO.forEach(o),RO.forEach(o),J3=d(nb),Wm=s(nb,"SPAN",{});var GO=i(Wm);X3=a(GO,"Seq2SeqTrainer"),GO.forEach(o),nb.forEach(o),Fg=d(t),tt=s(t,"DIV",{class:!0});var Sd=i(tt);u(_n.$$.fragment,Sd),K3=d(Sd),Ce=s(Sd,"DIV",{class:!0});var Zr=i(Ce);u(vn.$$.fragment,Zr),Q3=d(Zr),Gm=s(Zr,"P",{});var jO=i(Gm);e6=a(jO,"Run evaluation and returns metrics."),jO.forEach(o),t6=d(Zr),bn=s(Zr,"P",{});var sb=i(bn);o6=a(sb,`The calling script will be responsible for providing a method to compute metrics, as they are task-dependent (pass it to the init `),jm=s(sb,"CODE",{});var MO=i(jm);r6=a(MO,"compute_metrics"),MO.forEach(o),a6=a(sb," argument)."),sb.forEach(o),n6=d(Zr),Mm=s(Zr,"P",{});var VO=i(Mm);s6=a(VO,"You can also subclass and override this method to inject custom behavior."),VO.forEach(o),Zr.forEach(o),i6=d(Sd),K=s(Sd,"DIV",{class:!0});var Ve=i(K);u(yn.$$.fragment,Ve),l6=d(Ve),Vm=s(Ve,"P",{});var HO=i(Vm);d6=a(HO,"Run prediction and returns predictions and potential metrics."),HO.forEach(o),c6=d(Ve),wn=s(Ve,"P",{});var ib=i(wn);p6=a(ib,`Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method will also return metrics, like in `),Hm=s(ib,"CODE",{});var BO=i(Hm);m6=a(BO,"evaluate()"),BO.forEach(o),h6=a(ib,"."),ib.forEach(o),u6=d(Ve),u(nr.$$.fragment,Ve),f6=d(Ve),En=s(Ve,"P",{});var lb=i(En);g6=a(lb,"Returns: "),Bm=s(lb,"EM",{});var YO=i(Bm);_6=a(YO,"NamedTuple"),YO.forEach(o),v6=a(lb," A namedtuple with the following keys:"),lb.forEach(o),b6=d(Ve),mo=s(Ve,"UL",{});var Od=i(mo);ho=s(Od,"LI",{});var Cd=i(ho);y6=a(Cd,"predictions ("),Ym=s(Cd,"CODE",{});var ZO=i(Ym);w6=a(ZO,"np.ndarray"),ZO.forEach(o),E6=a(Cd,"): The predictions on "),Zm=s(Cd,"CODE",{});var JO=i(Zm);T6=a(JO,"test_dataset"),JO.forEach(o),$6=a(Cd,"."),Cd.forEach(o),k6=d(Od),uo=s(Od,"LI",{});var Id=i(uo);x6=a(Id,"label_ids ("),Jm=s(Id,"CODE",{});var XO=i(Jm);A6=a(XO,"np.ndarray"),XO.forEach(o),D6=a(Id,", "),Xm=s(Id,"EM",{});var KO=i(Xm);q6=a(KO,"optional"),KO.forEach(o),P6=a(Id,"): The labels (if the dataset contained some)."),Id.forEach(o),S6=d(Od),fo=s(Od,"LI",{});var Ud=i(fo);O6=a(Ud,"metrics ("),Km=s(Ud,"CODE",{});var QO=i(Km);C6=a(QO,"Dict[str, float]"),QO.forEach(o),I6=a(Ud,", "),Qm=s(Ud,"EM",{});var eC=i(Qm);U6=a(eC,"optional"),eC.forEach(o),N6=a(Ud,`): The potential dictionary of metrics (if the dataset contained labels).`),Ud.forEach(o),Od.forEach(o),Ve.forEach(o),Sd.forEach(o),Rg=d(t),go=s(t,"H2",{class:!0});var db=i(go);sr=s(db,"A",{id:!0,class:!0,href:!0});var tC=i(sr);eh=s(tC,"SPAN",{});var oC=i(eh);u(Tn.$$.fragment,oC),oC.forEach(o),tC.forEach(o),z6=d(db),th=s(db,"SPAN",{});var rC=i(th);L6=a(rC,"TrainingArguments"),rC.forEach(o),db.forEach(o),Wg=d(t),z=s(t,"DIV",{class:!0});var M=i(z);u($n.$$.fragment,M),F6=d(M),kn=s(M,"P",{});var cb=i(kn);R6=a(cb,"TrainingArguments is the subset of the arguments we use in our example scripts "),oh=s(cb,"STRONG",{});var aC=i(oh);W6=a(aC,`which relate to the training loop itself`),aC.forEach(o),G6=a(cb,"."),cb.forEach(o),j6=d(M),_o=s(M,"P",{});var Nd=i(_o);M6=a(Nd,"Using "),ki=s(Nd,"A",{href:!0});var nC=i(ki);V6=a(nC,"HfArgumentParser"),nC.forEach(o),H6=a(Nd,` we can turn this class into `),xn=s(Nd,"A",{href:!0,rel:!0});var sC=i(xn);B6=a(sC,"argparse"),sC.forEach(o),Y6=a(Nd,` arguments that can be specified on the command line.`),Nd.forEach(o),Z6=d(M),ce=s(M,"DIV",{class:!0});var Ht=i(ce);u(An.$$.fragment,Ht),J6=d(Ht),rh=s(Ht,"P",{});var iC=i(rh);X6=a(iC,`Returns the log level to be used depending on whether this process is the main process of node 0, main process of node non-0, or a non-main process.`),iC.forEach(o),K6=d(Ht),vo=s(Ht,"P",{});var zd=i(vo);Q6=a(zd,"For the main process the log level defaults to "),ah=s(zd,"CODE",{});var lC=i(ah);e$=a(lC,"logging.INFO"),lC.forEach(o),t$=a(zd," unless overridden by "),nh=s(zd,"CODE",{});var dC=i(nh);o$=a(dC,"log_level"),dC.forEach(o),r$=a(zd," argument."),zd.forEach(o),a$=d(Ht),bo=s(Ht,"P",{});var Ld=i(bo);n$=a(Ld,"For the replica processes the log level defaults to "),sh=s(Ld,"CODE",{});var cC=i(sh);s$=a(cC,"logging.WARNING"),cC.forEach(o),i$=a(Ld," unless overridden by "),ih=s(Ld,"CODE",{});var pC=i(ih);l$=a(pC,"log_level_replica"),pC.forEach(o),d$=a(Ld,` argument.`),Ld.forEach(o),c$=d(Ht),Dn=s(Ht,"P",{});var pb=i(Dn);p$=a(pb,"The choice between the main and replica process settings is made according to the return value of "),lh=s(pb,"CODE",{});var mC=i(lh);m$=a(mC,"should_log"),mC.forEach(o),h$=a(pb,"."),pb.forEach(o),Ht.forEach(o),u$=d(M),ir=s(M,"DIV",{class:!0});var mb=i(ir);u(qn.$$.fragment,mb),f$=d(mb),dh=s(mb,"P",{});var hC=i(dh);g$=a(hC,"Get number of steps used for a linear warmup."),hC.forEach(o),mb.forEach(o),_$=d(M),kt=s(M,"DIV",{class:!0});var Fd=i(kt);u(Pn.$$.fragment,Fd),v$=d(Fd),ch=s(Fd,"P",{});var uC=i(ch);b$=a(uC,`A context manager for torch distributed environment where on needs to do something on the main process, while blocking replicas, and when it\u2019s finished releasing the replicas.`),uC.forEach(o),y$=d(Fd),yo=s(Fd,"P",{});var Rd=i(yo);w$=a(Rd,"One such use is for "),ph=s(Rd,"CODE",{});var fC=i(ph);E$=a(fC,"datasets"),fC.forEach(o),T$=a(Rd,"\u2019s "),mh=s(Rd,"CODE",{});var gC=i(mh);$$=a(gC,"map"),gC.forEach(o),k$=a(Rd,` feature which to be efficient should be run once on the main process, which upon completion saves a cached version of results and which then automatically gets loaded by the replicas.`),Rd.forEach(o),Fd.forEach(o),x$=d(M),lr=s(M,"DIV",{class:!0});var hb=i(lr);u(Sn.$$.fragment,hb),A$=d(hb),On=s(hb,"P",{});var ub=i(On);D$=a(ub,"Serializes this instance while replace "),hh=s(ub,"CODE",{});var _C=i(hh);q$=a(_C,"Enum"),_C.forEach(o),P$=a(ub,` by their values (for JSON serialization support). It obfuscates the token values by removing their value.`),ub.forEach(o),hb.forEach(o),S$=d(M),dr=s(M,"DIV",{class:!0});var fb=i(dr);u(Cn.$$.fragment,fb),O$=d(fb),uh=s(fb,"P",{});var vC=i(uh);C$=a(vC,"Serializes this instance to a JSON string."),vC.forEach(o),fb.forEach(o),I$=d(M),cr=s(M,"DIV",{class:!0});var gb=i(cr);u(In.$$.fragment,gb),U$=d(gb),fh=s(gb,"P",{});var bC=i(fh);N$=a(bC,"Sanitized serialization to use with TensorBoard\u2019s hparams"),bC.forEach(o),gb.forEach(o),M.forEach(o),Gg=d(t),wo=s(t,"H2",{class:!0});var _b=i(wo);pr=s(_b,"A",{id:!0,class:!0,href:!0});var yC=i(pr);gh=s(yC,"SPAN",{});var wC=i(gh);u(Un.$$.fragment,wC),wC.forEach(o),yC.forEach(o),z$=d(_b),_h=s(_b,"SPAN",{});var EC=i(_h);L$=a(EC,"Seq2SeqTrainingArguments"),EC.forEach(o),_b.forEach(o),jg=d(t),ae=s(t,"DIV",{class:!0});var Bt=i(ae);u(Nn.$$.fragment,Bt),F$=d(Bt),zn=s(Bt,"P",{});var vb=i(zn);R$=a(vb,"TrainingArguments is the subset of the arguments we use in our example scripts "),vh=s(vb,"STRONG",{});var TC=i(vh);W$=a(TC,`which relate to the training loop itself`),TC.forEach(o),G$=a(vb,"."),vb.forEach(o),j$=d(Bt),Eo=s(Bt,"P",{});var Wd=i(Eo);M$=a(Wd,"Using "),xi=s(Wd,"A",{href:!0});var $C=i(xi);V$=a($C,"HfArgumentParser"),$C.forEach(o),H$=a(Wd,` we can turn this class into `),Ln=s(Wd,"A",{href:!0,rel:!0});var kC=i(Ln);B$=a(kC,"argparse"),kC.forEach(o),Y$=a(Wd,` arguments that can be specified on the command line.`),Wd.forEach(o),Z$=d(Bt),ne=s(Bt,"P",{});var He=i(ne);J$=a(He,"sortish_sampler ("),bh=s(He,"CODE",{});var xC=i(bh);X$=a(xC,"bool"),xC.forEach(o),K$=a(He,", "),yh=s(He,"EM",{});var AC=i(yh);Q$=a(AC,"optional"),AC.forEach(o),ek=a(He,", defaults to "),wh=s(He,"CODE",{});var DC=i(wh);tk=a(DC,"False"),DC.forEach(o),ok=a(He,`): Whether to use a `),Eh=s(He,"EM",{});var qC=i(Eh);rk=a(qC,"sortish sampler"),qC.forEach(o),ak=a(He," or not. Only possible if the underlying datasets are "),Th=s(He,"EM",{});var PC=i(Th);nk=a(PC,"Seq2SeqDataset"),PC.forEach(o),sk=a(He,` for now but will become generally available in the near future.`),He.forEach(o),ik=d(Bt),q=s(Bt,"P",{});var O=i(q);lk=a(O,`It sorts the inputs according to lengths in order to minimize the padding size, with a bit of randomness for the training set. predict_with_generate (`),$h=s(O,"CODE",{});var SC=i($h);dk=a(SC,"bool"),SC.forEach(o),ck=a(O,", "),kh=s(O,"EM",{});var OC=i(kh);pk=a(OC,"optional"),OC.forEach(o),mk=a(O,", defaults to "),xh=s(O,"CODE",{});var CC=i(xh);hk=a(CC,"False"),CC.forEach(o),uk=a(O,`): Whether to use generate to calculate generative metrics (ROUGE, BLEU). generation_max_length (`),Ah=s(O,"CODE",{});var IC=i(Ah);fk=a(IC,"int"),IC.forEach(o),gk=a(O,", "),Dh=s(O,"EM",{});var UC=i(Dh);_k=a(UC,"optional"),UC.forEach(o),vk=a(O,`): The `),qh=s(O,"CODE",{});var NC=i(qh);bk=a(NC,"max_length"),NC.forEach(o),yk=a(O," to use on each evaluation loop when "),Ph=s(O,"CODE",{});var zC=i(Ph);wk=a(zC,"predict_with_generate=True"),zC.forEach(o),Ek=a(O,`. Will default to the `),Sh=s(O,"CODE",{});var LC=i(Sh);Tk=a(LC,"max_length"),LC.forEach(o),$k=a(O,` value of the model configuration. generation_num_beams (`),Oh=s(O,"CODE",{});var FC=i(Oh);kk=a(FC,"int"),FC.forEach(o),xk=a(O,", "),Ch=s(O,"EM",{});var RC=i(Ch);Ak=a(RC,"optional"),RC.forEach(o),Dk=a(O,`): The `),Ih=s(O,"CODE",{});var WC=i(Ih);qk=a(WC,"num_beams"),WC.forEach(o),Pk=a(O," to use on each evaluation loop when "),Uh=s(O,"CODE",{});var GC=i(Uh);Sk=a(GC,"predict_with_generate=True"),GC.forEach(o),Ok=a(O,`. Will default to the `),Nh=s(O,"CODE",{});var jC=i(Nh);Ck=a(jC,"num_beams"),jC.forEach(o),Ik=a(O," value of the model configuration."),O.forEach(o),Bt.forEach(o),Mg=d(t),To=s(t,"H2",{class:!0});var bb=i(To);mr=s(bb,"A",{id:!0,class:!0,href:!0});var MC=i(mr);zh=s(MC,"SPAN",{});var VC=i(zh);u(Fn.$$.fragment,VC),VC.forEach(o),MC.forEach(o),Uk=d(bb),Lh=s(bb,"SPAN",{});var HC=i(Lh);Nk=a(HC,"Checkpoints"),HC.forEach(o),bb.forEach(o),Vg=d(t),pe=s(t,"P",{});var Yt=i(pe);zk=a(Yt,"By default, "),Ai=s(Yt,"A",{href:!0});var BC=i(Ai);Lk=a(BC,"Trainer"),BC.forEach(o),Fk=a(Yt," will save all checkpoints in the "),Fh=s(Yt,"CODE",{});var YC=i(Fh);Rk=a(YC,"output_dir"),YC.forEach(o),Wk=a(Yt,` you set in the `),Di=s(Yt,"A",{href:!0});var ZC=i(Di);Gk=a(ZC,"TrainingArguments"),ZC.forEach(o),jk=a(Yt," you are using. Those will go in subfolder named "),Rh=s(Yt,"CODE",{});var JC=i(Rh);Mk=a(JC,"checkpoint-xxx"),JC.forEach(o),Vk=a(Yt,` with xxx being the step at which the training was at.`),Yt.forEach(o),Hg=d(t),hr=s(t,"P",{});var yb=i(hr);Hk=a(yb,"Resuming training from a checkpoint can be done when calling "),qi=s(yb,"A",{href:!0});var XC=i(qi);Bk=a(XC,"Trainer.train()"),XC.forEach(o),Yk=a(yb," with either:"),yb.forEach(o),Bg=d(t),ur=s(t,"UL",{});var wb=i(ur);Pi=s(wb,"LI",{});var Iq=i(Pi);Wh=s(Iq,"CODE",{});var KC=i(Wh);Zk=a(KC,"resume_from_checkpoint=True"),KC.forEach(o),Jk=a(Iq," which will resume training from the latest checkpoint"),Iq.forEach(o),Xk=d(wb),Si=s(wb,"LI",{});var Uq=i(Si);Gh=s(Uq,"CODE",{});var QC=i(Gh);Kk=a(QC,"resume_from_checkpoint=checkpoint_dir"),QC.forEach(o),Qk=a(Uq,` which will resume training from the specific checkpoint in the directory passed.`),Uq.forEach(o),wb.forEach(o),Yg=d(t),Ie=s(t,"P",{});var Jr=i(Ie);e5=a(Jr,"In addition, you can easily save your checkpoints on the Model Hub when using "),jh=s(Jr,"CODE",{});var eI=i(jh);t5=a(eI,"push_to_hub=True"),eI.forEach(o),o5=a(Jr,`. By default, all the models saved in intermediate checkpoints are saved in different commits, but not the optimizer state. You can adapt the `),Mh=s(Jr,"CODE",{});var tI=i(Mh);r5=a(tI,"hub-strategy"),tI.forEach(o),a5=a(Jr," value of your "),Oi=s(Jr,"A",{href:!0});var oI=i(Oi);n5=a(oI,"TrainingArguments"),oI.forEach(o),s5=a(Jr," to either:"),Jr.forEach(o),Zg=d(t),fr=s(t,"UL",{});var Eb=i(fr);gr=s(Eb,"LI",{});var Pg=i(gr);Vh=s(Pg,"CODE",{});var rI=i(Vh);i5=a(rI,'"checkpoint"'),rI.forEach(o),l5=a(Pg,`: the latest checkpoint is also pushed in a subfolder named last-checkpoint, allowing you to resume training easily with `),Hh=s(Pg,"CODE",{});var aI=i(Hh);d5=a(aI,'trainer.train(resume_from_checkpoint="output_dir/last-checkpoint")'),aI.forEach(o),c5=a(Pg,"."),Pg.forEach(o),p5=d(Eb),Ci=s(Eb,"LI",{});var Nq=i(Ci);Bh=s(Nq,"CODE",{});var nI=i(Bh);m5=a(nI,'"all_checkpoints"'),nI.forEach(o),h5=a(Nq,`: all checkpoints are pushed like they appear in the output folder (so you will get one checkpoint folder per folder in your final repository)`),Nq.forEach(o),Eb.forEach(o),Jg=d(t),$o=s(t,"H2",{class:!0});var Tb=i($o);_r=s(Tb,"A",{id:!0,class:!0,href:!0});var sI=i(_r);Yh=s(sI,"SPAN",{});var iI=i(Yh);u(Rn.$$.fragment,iI),iI.forEach(o),sI.forEach(o),u5=d(Tb),Zh=s(Tb,"SPAN",{});var lI=i(Zh);f5=a(lI,"Logging"),lI.forEach(o),Tb.forEach(o),Xg=d(t),Ue=s(t,"P",{});var Xr=i(Ue);g5=a(Xr,"By default "),Ii=s(Xr,"A",{href:!0});var dI=i(Ii);_5=a(dI,"Trainer"),dI.forEach(o),v5=a(Xr," will use "),Jh=s(Xr,"CODE",{});var cI=i(Jh);b5=a(cI,"logging.INFO"),cI.forEach(o),y5=a(Xr," for the main process and "),Xh=s(Xr,"CODE",{});var pI=i(Xh);w5=a(pI,"logging.WARNING"),pI.forEach(o),E5=a(Xr," for the replicas if any."),Xr.forEach(o),Kg=d(t),xt=s(t,"P",{});var Gd=i(xt);T5=a(Gd,"These defaults can be overridden to use any of the 5 "),Kh=s(Gd,"CODE",{});var mI=i(Kh);$5=a(mI,"logging"),mI.forEach(o),k5=a(Gd," levels with "),Ui=s(Gd,"A",{href:!0});var hI=i(Ui);x5=a(hI,"TrainingArguments"),hI.forEach(o),A5=a(Gd,`\u2019s arguments:`),Gd.forEach(o),Qg=d(t),vr=s(t,"UL",{});var $b=i(vr);Ni=s($b,"LI",{});var zq=i(Ni);Qh=s(zq,"CODE",{});var uI=i(Qh);D5=a(uI,"log_level"),uI.forEach(o),q5=a(zq," - for the main process"),zq.forEach(o),P5=d($b),zi=s($b,"LI",{});var Lq=i(zi);eu=s(Lq,"CODE",{});var fI=i(eu);S5=a(fI,"log_level_replica"),fI.forEach(o),O5=a(Lq," - for the replicas"),Lq.forEach(o),$b.forEach(o),e_=d(t),Ne=s(t,"P",{});var Kr=i(Ne);C5=a(Kr,"Further, if "),Li=s(Kr,"A",{href:!0});var gI=i(Li);I5=a(gI,"TrainingArguments"),gI.forEach(o),U5=a(Kr,"\u2019s "),tu=s(Kr,"CODE",{});var _I=i(tu);N5=a(_I,"log_on_each_node"),_I.forEach(o),z5=a(Kr," is set to "),ou=s(Kr,"CODE",{});var vI=i(ou);L5=a(vI,"False"),vI.forEach(o),F5=a(Kr,` only the main node will use the log level settings for its main process, all other nodes will use the log level settings for replicas.`),Kr.forEach(o),t_=d(t),Q=s(t,"P",{});var Be=i(Q);R5=a(Be,"Note that "),Fi=s(Be,"A",{href:!0});var bI=i(Fi);W5=a(bI,"Trainer"),bI.forEach(o),G5=a(Be," is going to set "),ru=s(Be,"CODE",{});var yI=i(ru);j5=a(yI,"transformers"),yI.forEach(o),M5=a(Be,`\u2019s log level separately for each node in its `),au=s(Be,"CODE",{});var wI=i(au);V5=a(wI,"Trainer.__init__()"),wI.forEach(o),H5=a(Be,` So you may want to set this sooner (see the next example) if you tap into other `),nu=s(Be,"CODE",{});var EI=i(nu);B5=a(EI,"transformers"),EI.forEach(o),Y5=a(Be," functionality before creating the "),Ri=s(Be,"A",{href:!0});var TI=i(Ri);Z5=a(TI,"Trainer"),TI.forEach(o),J5=a(Be," object."),Be.forEach(o),o_=d(t),Wi=s(t,"P",{});var $I=i(Wi);X5=a($I,"Here is an example of how this can be used in an application:"),$I.forEach(o),r_=d(t),u(Wn.$$.fragment,t),a_=d(t),Gi=s(t,"P",{});var kI=i(Gi);K5=a(kI,`And then if you only want to see warnings on the main node and all other nodes to not print any most likely duplicated warnings you could run it as:`),kI.forEach(o),n_=d(t),u(Gn.$$.fragment,t),s_=d(t),ji=s(t,"P",{});var xI=i(ji);Q5=a(xI,`In the multi-node environment if you also don\u2019t want the logs to repeat for each node\u2019s main process, you will want to change the above to:`),xI.forEach(o),i_=d(t),u(jn.$$.fragment,t),l_=d(t),Mi=s(t,"P",{});var AI=i(Mi);ex=a(AI,`and then only the main process of the first node will log at the \u201Cwarning\u201D level, and all other processes on the main node and all processes on other nodes will log at the \u201Cerror\u201D level.`),AI.forEach(o),d_=d(t),Vi=s(t,"P",{});var DI=i(Vi);tx=a(DI,"If you need your application to be as quiet as possible you could do:"),DI.forEach(o),c_=d(t),u(Mn.$$.fragment,t),p_=d(t),br=s(t,"P",{});var kb=i(br);ox=a(kb,"(add "),su=s(kb,"CODE",{});var qI=i(su);rx=a(qI,"--log_on_each_node 0"),qI.forEach(o),ax=a(kb," if on multi-node environment)"),kb.forEach(o),m_=d(t),ko=s(t,"H2",{class:!0});var xb=i(ko);yr=s(xb,"A",{id:!0,class:!0,href:!0});var PI=i(yr);iu=s(PI,"SPAN",{});var SI=i(iu);u(Vn.$$.fragment,SI),SI.forEach(o),PI.forEach(o),nx=d(xb),lu=s(xb,"SPAN",{});var OI=i(lu);sx=a(OI,"Randomness"),OI.forEach(o),xb.forEach(o),h_=d(t),me=s(t,"P",{});var Zt=i(me);ix=a(Zt,"When resuming from a checkpoint generated by "),Hi=s(Zt,"A",{href:!0});var CI=i(Hi);lx=a(CI,"Trainer"),CI.forEach(o),dx=a(Zt,` all efforts are made to restore the `),du=s(Zt,"EM",{});var II=i(du);cx=a(II,"python"),II.forEach(o),px=a(Zt,", "),cu=s(Zt,"EM",{});var UI=i(cu);mx=a(UI,"numpy"),UI.forEach(o),hx=a(Zt," and "),pu=s(Zt,"EM",{});var NI=i(pu);ux=a(NI,"pytorch"),NI.forEach(o),fx=a(Zt,` RNG states to the same states as they were at the moment of saving that checkpoint, which should make the \u201Cstop and resume\u201D style of training as close as possible to non-stop training.`),Zt.forEach(o),u_=d(t),At=s(t,"P",{});var jd=i(At);gx=a(jd,`However, due to various default non-deterministic pytorch settings this might not fully work. If you want full determinism please refer to `),Hn=s(jd,"A",{href:!0,rel:!0});var zI=i(Hn);_x=a(zI,"Controlling sources of randomness"),zI.forEach(o),vx=a(jd,`. As explained in the document, that some of those settings that make things deterministic (.e.g., `),mu=s(jd,"CODE",{});var LI=i(mu);bx=a(LI,"torch.backends.cudnn.deterministic"),LI.forEach(o),yx=a(jd,`) may slow things down, therefore this can\u2019t be done by default, but you can enable those yourself if needed.`),jd.forEach(o),f_=d(t),xo=s(t,"H2",{class:!0});var Ab=i(xo);wr=s(Ab,"A",{id:!0,class:!0,href:!0});var FI=i(wr);hu=s(FI,"SPAN",{});var RI=i(hu);u(Bn.$$.fragment,RI),RI.forEach(o),FI.forEach(o),wx=d(Ab),uu=s(Ab,"SPAN",{});var WI=i(uu);Ex=a(WI,"Specific GPUs Selection"),WI.forEach(o),Ab.forEach(o),g_=d(t),Bi=s(t,"P",{});var GI=i(Bi);Tx=a(GI,"Let\u2019s discuss how you can tell your program which GPUs are to be used and in what order."),GI.forEach(o),__=d(t),Er=s(t,"P",{});var Db=i(Er);$x=a(Db,"When using "),Yn=s(Db,"A",{href:!0,rel:!0});var jI=i(Yn);fu=s(jI,"CODE",{});var MI=i(fu);kx=a(MI,"DistributedDataParallel"),MI.forEach(o),jI.forEach(o),xx=a(Db," to use only a subset of your GPUs, you simply specify the number of GPUs to use. For example, if you have 4 GPUs, but you wish to use the first 2 you can do:"),Db.forEach(o),v_=d(t),u(Zn.$$.fragment,t),b_=d(t),Dt=s(t,"P",{});var Md=i(Dt);Ax=a(Md,"if you have either "),Jn=s(Md,"A",{href:!0,rel:!0});var VI=i(Jn);gu=s(VI,"CODE",{});var HI=i(gu);Dx=a(HI,"accelerate"),HI.forEach(o),VI.forEach(o),qx=a(Md," or "),Xn=s(Md,"A",{href:!0,rel:!0});var BI=i(Xn);_u=s(BI,"CODE",{});var YI=i(_u);Px=a(YI,"deepspeed"),YI.forEach(o),BI.forEach(o),Sx=a(Md," installed you can also accomplish the same by using one of:"),Md.forEach(o),y_=d(t),u(Kn.$$.fragment,t),w_=d(t),u(Qn.$$.fragment,t),E_=d(t),Tr=s(t,"P",{});var qb=i(Tr);Ox=a(qb,"You don\u2019t need to use the Accelerate or "),Yi=s(qb,"A",{href:!0});var ZI=i(Yi);Cx=a(ZI,"the Deepspeed integration"),ZI.forEach(o),Ix=a(qb," features to use these launchers."),qb.forEach(o),T_=d(t),Zi=s(t,"P",{});var JI=i(Zi);Ux=a(JI,"Until now you were able to tell the program how many GPUs to use. Now let\u2019s discuss how to select specific GPUs and control their order."),JI.forEach(o),$_=d(t),Ji=s(t,"P",{});var XI=i(Ji);Nx=a(XI,"The following environment variables help you control which GPUs to use and their order."),XI.forEach(o),k_=d(t),Xi=s(t,"P",{});var KI=i(Xi);vu=s(KI,"STRONG",{});var QI=i(vu);bu=s(QI,"CODE",{});var eU=i(bu);zx=a(eU,"CUDA_VISIBLE_DEVICES"),eU.forEach(o),QI.forEach(o),KI.forEach(o),x_=d(t),$r=s(t,"P",{});var Pb=i($r);Lx=a(Pb,"If you have multiple GPUs and you\u2019d like to use only 1 or a few of those GPUs, set the environment variable "),yu=s(Pb,"CODE",{});var tU=i(yu);Fx=a(tU,"CUDA_VISIBLE_DEVICES"),tU.forEach(o),Rx=a(Pb," to a list of the GPUs to be used."),Pb.forEach(o),A_=d(t),Ki=s(t,"P",{});var oU=i(Ki);Wx=a(oU,"For example, let\u2019s say you have 4 GPUs: 0, 1, 2 and 3. To run only on the physical GPUs 0 and 2, you can do:"),oU.forEach(o),D_=d(t),u(es.$$.fragment,t),q_=d(t),qt=s(t,"P",{});var Vd=i(qt);Gx=a(Vd,"So now pytorch will see only 2 GPUs, where your physical GPUs 0 and 2 are mapped to "),wu=s(Vd,"CODE",{});var rU=i(wu);jx=a(rU,"cuda:0"),rU.forEach(o),Mx=a(Vd," and "),Eu=s(Vd,"CODE",{});var aU=i(Eu);Vx=a(aU,"cuda:1"),aU.forEach(o),Hx=a(Vd," correspondingly."),Vd.forEach(o),P_=d(t),Qi=s(t,"P",{});var nU=i(Qi);Bx=a(nU,"You can even change their order:"),nU.forEach(o),S_=d(t),u(ts.$$.fragment,t),O_=d(t),Pt=s(t,"P",{});var Hd=i(Pt);Yx=a(Hd,"Here your physical GPUs 0 and 2 are mapped to "),Tu=s(Hd,"CODE",{});var sU=i(Tu);Zx=a(sU,"cuda:1"),sU.forEach(o),Jx=a(Hd," and "),$u=s(Hd,"CODE",{});var iU=i($u);Xx=a(iU,"cuda:0"),iU.forEach(o),Kx=a(Hd," correspondingly."),Hd.forEach(o),C_=d(t),St=s(t,"P",{});var Bd=i(St);Qx=a(Bd,"The above examples were all for "),ku=s(Bd,"CODE",{});var lU=i(ku);eA=a(lU,"DistributedDataParallel"),lU.forEach(o),tA=a(Bd," use pattern, but the same method works for "),os=s(Bd,"A",{href:!0,rel:!0});var dU=i(os);xu=s(dU,"CODE",{});var cU=i(xu);oA=a(cU,"DataParallel"),cU.forEach(o),dU.forEach(o),rA=a(Bd," as well:"),Bd.forEach(o),I_=d(t),u(rs.$$.fragment,t),U_=d(t),el=s(t,"P",{});var pU=i(el);aA=a(pU,"To emulate an environment without GPUs simply set this environment variable to an empty value like so:"),pU.forEach(o),N_=d(t),u(as.$$.fragment,t),z_=d(t),tl=s(t,"P",{});var mU=i(tl);nA=a(mU,"As with any environment variable you can, of course, export those instead of adding these to the command line, as in:"),mU.forEach(o),L_=d(t),u(ns.$$.fragment,t),F_=d(t),ol=s(t,"P",{});var hU=i(ol);sA=a(hU,"but this approach can be confusing since you may forget you set up the environment variable earlier and not understand why the wrong GPUs are used. Therefore, it\u2019s a common practice to set the environment variable just for a specific run on the same command line as it\u2019s shown in most examples of this section."),hU.forEach(o),R_=d(t),rl=s(t,"P",{});var uU=i(rl);Au=s(uU,"STRONG",{});var fU=i(Au);Du=s(fU,"CODE",{});var gU=i(Du);iA=a(gU,"CUDA_DEVICE_ORDER"),gU.forEach(o),fU.forEach(o),uU.forEach(o),W_=d(t),kr=s(t,"P",{});var Sb=i(kr);lA=a(Sb,"There is an additional environment variable "),qu=s(Sb,"CODE",{});var _U=i(qu);dA=a(_U,"CUDA_DEVICE_ORDER"),_U.forEach(o),cA=a(Sb," that controls how the physical devices are ordered. The two choices are:"),Sb.forEach(o),G_=d(t),al=s(t,"OL",{});var vU=i(al);ss=s(vU,"LI",{});var Ob=i(ss);pA=a(Ob,"ordered by PCIe bus IDs (matches "),Pu=s(Ob,"CODE",{});var bU=i(Pu);mA=a(bU,"nvidia-smi"),bU.forEach(o),hA=a(Ob,"\u2019s order) - this is the default."),Ob.forEach(o),vU.forEach(o),j_=d(t),u(is.$$.fragment,t),M_=d(t),ls=s(t,"OL",{start:!0});var yU=i(ls);Su=s(yU,"LI",{});var wU=i(Su);uA=a(wU,"ordered by GPU compute capabilities"),wU.forEach(o),yU.forEach(o),V_=d(t),u(ds.$$.fragment,t),H_=d(t),Ot=s(t,"P",{});var Yd=i(Ot);fA=a(Yd,"Most of the time you don\u2019t need to care about this environment variable, but it\u2019s very helpful if you have a lopsided setup where you have an old and a new GPUs physically inserted in such a way so that the slow older card appears to be first. One way to fix that is to swap the cards. But if you can\u2019t swap the cards (e.g., if the cooling of the devices gets impacted) then setting "),Ou=s(Yd,"CODE",{});var EU=i(Ou);gA=a(EU,"CUDA_DEVICE_ORDER=FASTEST_FIRST"),EU.forEach(o),_A=a(Yd," will always put the newer faster card first. It\u2019ll be somewhat confusing though since "),Cu=s(Yd,"CODE",{});var TU=i(Cu);vA=a(TU,"nvidia-smi"),TU.forEach(o),bA=a(Yd," will still report them in the PCIe order."),Yd.forEach(o),B_=d(t),nl=s(t,"P",{});var $U=i(nl);yA=a($U,"The other solution to swapping the order is to use:"),$U.forEach(o),Y_=d(t),u(cs.$$.fragment,t),Z_=d(t),sl=s(t,"P",{});var kU=i(sl);wA=a(kU,"In this example we are working with just 2 GPUs, but of course the same would apply to as many GPUs as your computer has."),kU.forEach(o),J_=d(t),xr=s(t,"P",{});var Cb=i(xr);EA=a(Cb,"Also if you do set this environment variable it\u2019s the best to set it in your "),Iu=s(Cb,"CODE",{});var xU=i(Iu);TA=a(xU,"~/.bashrc"),xU.forEach(o),$A=a(Cb," file or some other startup config file and forget about it."),Cb.forEach(o),X_=d(t),Ao=s(t,"H2",{class:!0});var Ib=i(Ao);Ar=s(Ib,"A",{id:!0,class:!0,href:!0});var AU=i(Ar);Uu=s(AU,"SPAN",{});var DU=i(Uu);u(ps.$$.fragment,DU),DU.forEach(o),AU.forEach(o),kA=d(Ib),Nu=s(Ib,"SPAN",{});var qU=i(Nu);xA=a(qU,"Trainer Integrations"),qU.forEach(o),Ib.forEach(o),K_=d(t),Dr=s(t,"P",{});var Ub=i(Dr);AA=a(Ub,"The "),il=s(Ub,"A",{href:!0});var PU=i(il);DA=a(PU,"Trainer"),PU.forEach(o),qA=a(Ub,` has been extended to support libraries that may dramatically improve your training time and fit much bigger models.`),Ub.forEach(o),Q_=d(t),ze=s(t,"P",{});var Qr=i(ze);PA=a(Qr,"Currently it supports third party solutions, "),ms=s(Qr,"A",{href:!0,rel:!0});var SU=i(ms);SA=a(SU,"DeepSpeed"),SU.forEach(o),OA=a(Qr," and "),hs=s(Qr,"A",{href:!0,rel:!0});var OU=i(hs);CA=a(OU,"FairScale"),OU.forEach(o),IA=a(Qr,", which implement parts of the paper "),us=s(Qr,"A",{href:!0,rel:!0});var CU=i(us);UA=a(CU,`ZeRO: Memory Optimizations Toward Training Trillion Parameter Models, by Samyam Rajbhandari, Jeff Rasley, Olatunji Ruwase, Yuxiong He`),CU.forEach(o),NA=a(Qr,"."),Qr.forEach(o),ev=d(t),ll=s(t,"P",{});var IU=i(ll);zA=a(IU,"This provided support is new and experimental as of this writing."),IU.forEach(o),tv=d(t),dl=s(t,"A",{id:!0}),i(dl).forEach(o),ov=d(t),Do=s(t,"H3",{class:!0});var Nb=i(Do);qr=s(Nb,"A",{id:!0,class:!0,href:!0});var UU=i(qr);zu=s(UU,"SPAN",{});var NU=i(zu);u(fs.$$.fragment,NU),NU.forEach(o),UU.forEach(o),LA=d(Nb),Lu=s(Nb,"SPAN",{});var zU=i(Lu);FA=a(zU,"CUDA Extension Installation Notes"),zU.forEach(o),Nb.forEach(o),rv=d(t),cl=s(t,"P",{});var LU=i(cl);RA=a(LU,"As of this writing, both FairScale and Deepspeed require compilation of CUDA C++ code, before they can be used."),LU.forEach(o),av=d(t),Ct=s(t,"P",{});var Zd=i(Ct);WA=a(Zd,"While all installation issues should be dealt with through the corresponding GitHub Issues of "),gs=s(Zd,"A",{href:!0,rel:!0});var FU=i(gs);GA=a(FU,"FairScale"),FU.forEach(o),jA=a(Zd," and "),_s=s(Zd,"A",{href:!0,rel:!0});var RU=i(_s);MA=a(RU,"Deepspeed"),RU.forEach(o),VA=a(Zd,`, there are a few common issues that one may encounter while building any PyTorch extension that needs to build CUDA extensions.`),Zd.forEach(o),nv=d(t),pl=s(t,"P",{});var WU=i(pl);HA=a(WU,"Therefore, if you encounter a CUDA-related build issue while doing one of the following or both:"),WU.forEach(o),sv=d(t),u(vs.$$.fragment,t),iv=d(t),ml=s(t,"P",{});var GU=i(ml);BA=a(GU,"please, read the following notes first."),GU.forEach(o),lv=d(t),It=s(t,"P",{});var Jd=i(It);YA=a(Jd,"In these notes we give examples for what to do when "),Fu=s(Jd,"CODE",{});var jU=i(Fu);ZA=a(jU,"pytorch"),jU.forEach(o),JA=a(Jd," has been built with CUDA "),Ru=s(Jd,"CODE",{});var MU=i(Ru);XA=a(MU,"10.2"),MU.forEach(o),KA=a(Jd,`. If your situation is different remember to adjust the version number to the one you are after.`),Jd.forEach(o),dv=d(t),qo=s(t,"H4",{class:!0});var zb=i(qo);Pr=s(zb,"A",{id:!0,class:!0,href:!0});var VU=i(Pr);Wu=s(VU,"SPAN",{});var HU=i(Wu);u(bs.$$.fragment,HU),HU.forEach(o),VU.forEach(o),QA=d(zb),Gu=s(zb,"SPAN",{});var BU=i(Gu);e9=a(BU,"Possible problem #1"),BU.forEach(o),zb.forEach(o),cv=d(t),hl=s(t,"P",{});var YU=i(hl);t9=a(YU,`While, Pytorch comes with its own CUDA toolkit, to build these two projects you must have an identical version of CUDA installed system-wide.`),YU.forEach(o),pv=d(t),Le=s(t,"P",{});var ea=i(Le);o9=a(ea,"For example, if you installed "),ju=s(ea,"CODE",{});var ZU=i(ju);r9=a(ZU,"pytorch"),ZU.forEach(o),a9=a(ea," with "),Mu=s(ea,"CODE",{});var JU=i(Mu);n9=a(JU,"cudatoolkit==10.2"),JU.forEach(o),s9=a(ea,` in the Python environment, you also need to have CUDA `),Vu=s(ea,"CODE",{});var XU=i(Vu);i9=a(XU,"10.2"),XU.forEach(o),l9=a(ea," installed system-wide."),ea.forEach(o),mv=d(t),Ut=s(t,"P",{});var Xd=i(Ut);d9=a(Xd,"The exact location may vary from system to system, but "),Hu=s(Xd,"CODE",{});var KU=i(Hu);c9=a(KU,"/usr/local/cuda-10.2"),KU.forEach(o),p9=a(Xd,` is the most common location on many Unix systems. When CUDA is correctly set up and added to the `),Bu=s(Xd,"CODE",{});var QU=i(Bu);m9=a(QU,"PATH"),QU.forEach(o),h9=a(Xd,` environment variable, one can find the installation location by doing:`),Xd.forEach(o),hv=d(t),u(ys.$$.fragment,t),uv=d(t),Sr=s(t,"P",{});var Lb=i(Sr);u9=a(Lb,`If you don\u2019t have CUDA installed system-wide, install it first. You will find the instructions by using your favorite search engine. For example, if you\u2019re on Ubuntu you may want to search for: `),ws=s(Lb,"A",{href:!0,rel:!0});var eN=i(ws);f9=a(eN,"ubuntu cuda 10.2 install"),eN.forEach(o),g9=a(Lb,"."),Lb.forEach(o),fv=d(t),Po=s(t,"H4",{class:!0});var Fb=i(Po);Or=s(Fb,"A",{id:!0,class:!0,href:!0});var tN=i(Or);Yu=s(tN,"SPAN",{});var oN=i(Yu);u(Es.$$.fragment,oN),oN.forEach(o),tN.forEach(o),_9=d(Fb),Zu=s(Fb,"SPAN",{});var rN=i(Zu);v9=a(rN,"Possible problem #2"),rN.forEach(o),Fb.forEach(o),gv=d(t),ul=s(t,"P",{});var aN=i(ul);b9=a(aN,`Another possible common problem is that you may have more than one CUDA toolkit installed system-wide. For example you may have:`),aN.forEach(o),_v=d(t),u(Ts.$$.fragment,t),vv=d(t),Nt=s(t,"P",{});var Kd=i(Nt);y9=a(Kd,"Now, in this situation you need to make sure that your "),Ju=s(Kd,"CODE",{});var nN=i(Ju);w9=a(nN,"PATH"),nN.forEach(o),E9=a(Kd," and "),Xu=s(Kd,"CODE",{});var sN=i(Xu);T9=a(sN,"LD_LIBRARY_PATH"),sN.forEach(o),$9=a(Kd,` environment variables contain the correct paths to the desired CUDA version. Typically, package installers will set these to contain whatever the last version was installed. If you encounter the problem, where the package build fails because it can\u2019t find the right CUDA version despite you having it installed system-wide, it means that you need to adjust the 2 aforementioned environment variables.`),Kd.forEach(o),bv=d(t),fl=s(t,"P",{});var iN=i(fl);k9=a(iN,"First, you may look at their contents:"),iN.forEach(o),yv=d(t),u($s.$$.fragment,t),wv=d(t),gl=s(t,"P",{});var lN=i(gl);x9=a(lN,"so you get an idea of what is inside."),lN.forEach(o),Ev=d(t),Cr=s(t,"P",{});var Rb=i(Cr);A9=a(Rb,"It\u2019s possible that "),Ku=s(Rb,"CODE",{});var dN=i(Ku);D9=a(dN,"LD_LIBRARY_PATH"),dN.forEach(o),q9=a(Rb," is empty."),Rb.forEach(o),Tv=d(t),ot=s(t,"P",{});var Ks=i(ot);Qu=s(Ks,"CODE",{});var cN=i(Qu);P9=a(cN,"PATH"),cN.forEach(o),S9=a(Ks," lists the locations of where executables can be found and "),ef=s(Ks,"CODE",{});var pN=i(ef);O9=a(pN,"LD_LIBRARY_PATH"),pN.forEach(o),C9=a(Ks,` is for where shared libraries are to looked for. In both cases, earlier entries have priority over the later ones. `),tf=s(Ks,"CODE",{});var mN=i(tf);I9=a(mN,":"),mN.forEach(o),U9=a(Ks,` is used to separate multiple entries.`),Ks.forEach(o),$v=d(t),_l=s(t,"P",{});var hN=i(_l);N9=a(hN,`Now, to tell the build program where to find the specific CUDA toolkit, insert the desired paths to be listed first by doing:`),hN.forEach(o),kv=d(t),u(ks.$$.fragment,t),xv=d(t),vl=s(t,"P",{});var uN=i(vl);z9=a(uN,"Note that we aren\u2019t overwriting the existing values, but prepending instead."),uN.forEach(o),Av=d(t),Fe=s(t,"P",{});var ta=i(Fe);L9=a(ta,`Of course, adjust the version number, the full path if need be. Check that the directories you assign actually do exist. `),of=s(ta,"CODE",{});var fN=i(of);F9=a(fN,"lib64"),fN.forEach(o),R9=a(ta," sub-directory is where the various CUDA "),rf=s(ta,"CODE",{});var gN=i(rf);W9=a(gN,".so"),gN.forEach(o),G9=a(ta," objects, like "),af=s(ta,"CODE",{});var _N=i(af);j9=a(_N,"libcudart.so"),_N.forEach(o),M9=a(ta,` reside, it\u2019s unlikely that your system will have it named differently, but if it is adjust it to reflect your reality.`),ta.forEach(o),Dv=d(t),So=s(t,"H4",{class:!0});var Wb=i(So);Ir=s(Wb,"A",{id:!0,class:!0,href:!0});var vN=i(Ir);nf=s(vN,"SPAN",{});var bN=i(nf);u(xs.$$.fragment,bN),bN.forEach(o),vN.forEach(o),V9=d(Wb),sf=s(Wb,"SPAN",{});var yN=i(sf);H9=a(yN,"Possible problem #3"),yN.forEach(o),Wb.forEach(o),qv=d(t),zt=s(t,"P",{});var Qd=i(zt);B9=a(Qd,"Some older CUDA versions may refuse to build with newer compilers. For example, you my have "),lf=s(Qd,"CODE",{});var wN=i(lf);Y9=a(wN,"gcc-9"),wN.forEach(o),Z9=a(Qd,` but it wants `),df=s(Qd,"CODE",{});var EN=i(df);J9=a(EN,"gcc-7"),EN.forEach(o),X9=a(Qd,"."),Qd.forEach(o),Pv=d(t),bl=s(t,"P",{});var TN=i(bl);K9=a(TN,"There are various ways to go about it."),TN.forEach(o),Sv=d(t),yl=s(t,"P",{});var $N=i(yl);Q9=a($N,"If you can install the latest CUDA toolkit it typically should support the newer compiler."),$N.forEach(o),Ov=d(t),Ur=s(t,"P",{});var Gb=i(Ur);e8=a(Gb,`Alternatively, you could install the lower version of the compiler in addition to the one you already have, or you may already have it but it\u2019s not the default one, so the build system can\u2019t see it. If you have `),cf=s(Gb,"CODE",{});var kN=i(cf);t8=a(kN,"gcc-7"),kN.forEach(o),o8=a(Gb,` installed but the build system complains it can\u2019t find it, the following might do the trick:`),Gb.forEach(o),Cv=d(t),u(As.$$.fragment,t),Iv=d(t),j=s(t,"P",{});var ue=i(j);r8=a(ue,"Here, we are making a symlink to "),pf=s(ue,"CODE",{});var xN=i(pf);a8=a(xN,"gcc-7"),xN.forEach(o),n8=a(ue," from "),mf=s(ue,"CODE",{});var AN=i(mf);s8=a(AN,"/usr/local/cuda-10.2/bin/gcc"),AN.forEach(o),i8=a(ue,` and since `),hf=s(ue,"CODE",{});var DN=i(hf);l8=a(DN,"/usr/local/cuda-10.2/bin/"),DN.forEach(o),d8=a(ue," should be in the "),uf=s(ue,"CODE",{});var qN=i(uf);c8=a(qN,"PATH"),qN.forEach(o),p8=a(ue,` environment variable (see the previous problem\u2019s solution), it should find `),ff=s(ue,"CODE",{});var PN=i(ff);m8=a(PN,"gcc-7"),PN.forEach(o),h8=a(ue," (and "),gf=s(ue,"CODE",{});var SN=i(gf);u8=a(SN,"g++7"),SN.forEach(o),f8=a(ue,") and then the build will succeed."),ue.forEach(o),Uv=d(t),wl=s(t,"P",{});var ON=i(wl);g8=a(ON,"As always make sure to edit the paths in the example to match your situation."),ON.forEach(o),Nv=d(t),Oo=s(t,"H3",{class:!0});var jb=i(Oo);Nr=s(jb,"A",{id:!0,class:!0,href:!0});var CN=i(Nr);_f=s(CN,"SPAN",{});var IN=i(_f);u(Ds.$$.fragment,IN),IN.forEach(o),CN.forEach(o),_8=d(jb),vf=s(jb,"SPAN",{});var UN=i(vf);v8=a(UN,"FairScale"),UN.forEach(o),jb.forEach(o),zv=d(t),Re=s(t,"P",{});var oa=i(Re);b8=a(oa,"By integrating "),qs=s(oa,"A",{href:!0,rel:!0});var NN=i(qs);y8=a(NN,"FairScale"),NN.forEach(o),w8=a(oa," the "),El=s(oa,"A",{href:!0});var zN=i(El);E8=a(zN,"Trainer"),zN.forEach(o),T8=a(oa,` provides support for the following features from `),Ps=s(oa,"A",{href:!0,rel:!0});var LN=i(Ps);$8=a(LN,"the ZeRO paper"),LN.forEach(o),k8=a(oa,":"),oa.forEach(o),Lv=d(t),We=s(t,"OL",{});var ra=i(We);bf=s(ra,"LI",{});var FN=i(bf);x8=a(FN,"Optimizer State Sharding"),FN.forEach(o),A8=d(ra),yf=s(ra,"LI",{});var RN=i(yf);D8=a(RN,"Gradient Sharding"),RN.forEach(o),q8=d(ra),wf=s(ra,"LI",{});var WN=i(wf);P8=a(WN,"Model Parameters Sharding (new and very experimental)"),WN.forEach(o),S8=d(ra),Ef=s(ra,"LI",{});var GN=i(Ef);O8=a(GN,"CPU offload (new and very experimental)"),GN.forEach(o),ra.forEach(o),Fv=d(t),Tl=s(t,"P",{});var jN=i(Tl);C8=a(jN,"You will need at least two GPUs to use this feature."),jN.forEach(o),Rv=d(t),Ss=s(t,"P",{});var Fq=i(Ss);Tf=s(Fq,"STRONG",{});var MN=i(Tf);I8=a(MN,"Installation"),MN.forEach(o),U8=a(Fq,":"),Fq.forEach(o),Wv=d(t),$l=s(t,"P",{});var VN=i($l);N8=a(VN,"Install the library via pypi:"),VN.forEach(o),Gv=d(t),u(Os.$$.fragment,t),jv=d(t),Lt=s(t,"P",{});var ec=i(Lt);z8=a(ec,"or via "),$f=s(ec,"CODE",{});var HN=i($f);L8=a(HN,"transformers"),HN.forEach(o),F8=a(ec,"\u2019 "),kf=s(ec,"CODE",{});var BN=i(kf);R8=a(BN,"extras"),BN.forEach(o),W8=a(ec,":"),ec.forEach(o),Mv=d(t),u(Cs.$$.fragment,t),Vv=d(t),Ft=s(t,"P",{});var tc=i(Ft);G8=a(tc,"(available starting from "),xf=s(tc,"CODE",{});var YN=i(xf);j8=a(YN,"transformers==4.6.0"),YN.forEach(o),M8=a(tc,") or find more details on "),Is=s(tc,"A",{href:!0,rel:!0});var ZN=i(Is);V8=a(ZN,"the FairScale\u2019s GitHub page"),ZN.forEach(o),H8=a(tc,"."),tc.forEach(o),Hv=d(t),zr=s(t,"P",{});var Mb=i(zr);B8=a(Mb,"If you\u2019re still struggling with the build, first make sure to read "),kl=s(Mb,"A",{href:!0});var JN=i(kl);Y8=a(JN,"CUDA Extension Installation Notes"),JN.forEach(o),Z8=a(Mb,"."),Mb.forEach(o),Bv=d(t),xl=s(t,"P",{});var XN=i(xl);J8=a(XN,"If it\u2019s still not resolved the build issue, here are a few more ideas."),XN.forEach(o),Yv=d(t),Us=s(t,"P",{});var Rq=i(Us);Af=s(Rq,"CODE",{});var KN=i(Af);X8=a(KN,"fairscale"),KN.forEach(o),K8=a(Rq,` seems to have an issue with the recently introduced by pip build isolation feature. If you have a problem with it, you may want to try one of:`),Rq.forEach(o),Zv=d(t),u(Ns.$$.fragment,t),Jv=d(t),Al=s(t,"P",{});var QN=i(Al);Q8=a(QN,"or:"),QN.forEach(o),Xv=d(t),u(zs.$$.fragment,t),Kv=d(t),Ls=s(t,"P",{});var Wq=i(Ls);Df=s(Wq,"CODE",{});var ez=i(Df);eD=a(ez,"fairscale"),ez.forEach(o),tD=a(Wq," also has issues with building against pytorch-nightly, so if you use it you may have to try one of:"),Wq.forEach(o),Qv=d(t),u(Fs.$$.fragment,t),e1=d(t),Dl=s(t,"P",{});var tz=i(Dl);oD=a(tz,"or:"),tz.forEach(o),t1=d(t),u(Rs.$$.fragment,t),o1=d(t),ql=s(t,"P",{});var oz=i(ql);rD=a(oz,"Of course, adjust the urls to match the cuda version you use."),oz.forEach(o),r1=d(t),Lr=s(t,"P",{});var Vb=i(Lr);aD=a(Vb,`If after trying everything suggested you still encounter build issues, please, proceed with the GitHub Issue of `),Ws=s(Vb,"A",{href:!0,rel:!0});var rz=i(Ws);nD=a(rz,"FairScale"),rz.forEach(o),sD=a(Vb,"."),Vb.forEach(o),a1=d(t),Gs=s(t,"P",{});var Gq=i(Gs);qf=s(Gq,"STRONG",{});var az=i(qf);iD=a(az,"Usage"),az.forEach(o),lD=a(Gq,":"),Gq.forEach(o),n1=d(t),Rt=s(t,"P",{});var oc=i(Rt);dD=a(oc,"To use the first version of Sharded data-parallelism, add "),Pf=s(oc,"CODE",{});var nz=i(Pf);cD=a(nz,"--sharded_ddp simple"),nz.forEach(o),pD=a(oc,` to the command line arguments, and make sure you have added the distributed launcher `),Sf=s(oc,"CODE",{});var sz=i(Sf);mD=a(sz,"-m torch.distributed.launch --nproc_per_node=NUMBER_OF_GPUS_YOU_HAVE"),sz.forEach(o),hD=a(oc," if you haven\u2019t been using it already."),oc.forEach(o),s1=d(t),Fr=s(t,"P",{});var Hb=i(Fr);uD=a(Hb,"For example here is how you could use it for "),Of=s(Hb,"CODE",{});var iz=i(Of);fD=a(iz,"run_translation.py"),iz.forEach(o),gD=a(Hb," with 2 GPUs:"),Hb.forEach(o),i1=d(t),u(js.$$.fragment,t),l1=d(t),Pl=s(t,"P",{});var lz=i(Pl);_D=a(lz,"Notes:"),lz.forEach(o),d1=d(t),Ge=s(t,"UL",{});var aa=i(Ge);Cf=s(aa,"LI",{});var dz=i(Cf);vD=a(dz,"This feature requires distributed training (so multiple GPUs)."),dz.forEach(o),bD=d(aa),If=s(aa,"LI",{});var cz=i(If);yD=a(cz,"It is not implemented for TPUs."),cz.forEach(o),wD=d(aa),Ms=s(aa,"LI",{});var Bb=i(Ms);ED=a(Bb,"It works with "),Uf=s(Bb,"CODE",{});var pz=i(Uf);TD=a(pz,"--fp16"),pz.forEach(o),$D=a(Bb," too, to make things even faster."),Bb.forEach(o),kD=d(aa),Vs=s(aa,"LI",{});var Yb=i(Vs);xD=a(Yb,"One of the main benefits of enabling "),Nf=s(Yb,"CODE",{});var mz=i(Nf);AD=a(mz,"--sharded_ddp simple"),mz.forEach(o),DD=a(Yb,` is that it uses a lot less GPU memory, so you should be able to use significantly larger batch sizes using the same hardware (e.g. 3x and even bigger) which should lead to significantly shorter training time.`),Yb.forEach(o),aa.forEach(o),c1=d(t),Hs=s(t,"OL",{start:!0});var hz=i(Hs);rt=s(hz,"LI",{});var na=i(rt);qD=a(na,"To use the second version of Sharded data-parallelism, add "),zf=s(na,"CODE",{});var uz=i(zf);PD=a(uz,"--sharded_ddp zero_dp_2"),uz.forEach(o),SD=a(na," or "),Lf=s(na,"CODE",{});var fz=i(Lf);OD=a(fz,"--sharded_ddp zero_dp_3"),fz.forEach(o),CD=a(na," to the command line arguments, and make sure you have added the distributed launcher "),Ff=s(na,"CODE",{});var gz=i(Ff);ID=a(gz,"-m torch.distributed.launch --nproc_per_node=NUMBER_OF_GPUS_YOU_HAVE"),gz.forEach(o),UD=a(na," if you haven\u2019t been using it already."),na.forEach(o),hz.forEach(o),p1=d(t),Rr=s(t,"P",{});var Zb=i(Rr);ND=a(Zb,"For example here is how you could use it for "),Rf=s(Zb,"CODE",{});var _z=i(Rf);zD=a(_z,"run_translation.py"),_z.forEach(o),LD=a(Zb," with 2 GPUs:"),Zb.forEach(o),m1=d(t),u(Bs.$$.fragment,t),h1=d(t),Co=s(t,"P",{});var Sg=i(Co);Wf=s(Sg,"CODE",{});var vz=i(Wf);FD=a(vz,"zero_dp_2"),vz.forEach(o),RD=a(Sg," is an optimized version of the simple wrapper, while "),Gf=s(Sg,"CODE",{});var bz=i(Gf);WD=a(bz,"zero_dp_3"),bz.forEach(o),GD=a(Sg,` fully shards model weights, gradients and optimizer states.`),Sg.forEach(o),u1=d(t),Wt=s(t,"P",{});var rc=i(Wt);jD=a(rc,"Both are compatible with adding "),jf=s(rc,"CODE",{});var yz=i(jf);MD=a(yz,"cpu_offload"),yz.forEach(o),VD=a(rc," to enable ZeRO-offload (activate it like this: "),Mf=s(rc,"CODE",{});var wz=i(Mf);HD=a(wz,'--sharded_ddp "zero_dp_2 cpu_offload"'),wz.forEach(o),BD=a(rc,")."),rc.forEach(o),f1=d(t),Sl=s(t,"P",{});var Ez=i(Sl);YD=a(Ez,"Notes:"),Ez.forEach(o),g1=d(t),he=s(t,"UL",{});var Jt=i(he);Vf=s(Jt,"LI",{});var Tz=i(Vf);ZD=a(Tz,"This feature requires distributed training (so multiple GPUs)."),Tz.forEach(o),JD=d(Jt),Hf=s(Jt,"LI",{});var $z=i(Hf);XD=a($z,"It is not implemented for TPUs."),$z.forEach(o),KD=d(Jt),Ys=s(Jt,"LI",{});var Jb=i(Ys);QD=a(Jb,"It works with "),Bf=s(Jb,"CODE",{});var kz=i(Bf);e7=a(kz,"--fp16"),kz.forEach(o),t7=a(Jb," too, to make things even faster."),Jb.forEach(o),o7=d(Jt),Io=s(Jt,"LI",{});var ac=i(Io);r7=a(ac,"The "),Yf=s(ac,"CODE",{});var xz=i(Yf);a7=a(xz,"cpu_offload"),xz.forEach(o),n7=a(ac," additional option requires "),Zf=s(ac,"CODE",{});var Az=i(Zf);s7=a(Az,"--fp16"),Az.forEach(o),i7=a(ac,"."),ac.forEach(o),l7=d(Jt),Jf=s(Jt,"LI",{});var Dz=i(Jf);d7=a(Dz,`This is an area of active development, so make sure you have a source install of fairscale to use this feature as some bugs you encounter may have been fixed there already.`),Dz.forEach(o),Jt.forEach(o),_1=d(t),Ol=s(t,"P",{});var qz=i(Ol);c7=a(qz,"Known caveats:"),qz.forEach(o),v1=d(t),Wr=s(t,"UL",{});var Xb=i(Wr);Uo=s(Xb,"LI",{});var nc=i(Uo);p7=a(nc,"This feature is incompatible with "),Xf=s(nc,"CODE",{});var Pz=i(Xf);m7=a(Pz,"--predict_with_generate"),Pz.forEach(o),h7=a(nc," in the "),Kf=s(nc,"EM",{});var Sz=i(Kf);u7=a(Sz,"run_translation.py"),Sz.forEach(o),f7=a(nc," script."),nc.forEach(o),g7=d(Xb),ke=s(Xb,"LI",{});var Xt=i(ke);_7=a(Xt,"Using "),Qf=s(Xt,"CODE",{});var Oz=i(Qf);v7=a(Oz,"--sharded_ddp zero_dp_3"),Oz.forEach(o),b7=a(Xt,` requires wrapping each layer of the model in the special container `),eg=s(Xt,"CODE",{});var Cz=i(eg);y7=a(Cz,"FullyShardedDataParallelism"),Cz.forEach(o),w7=a(Xt," of fairscale. It should be used with the option "),tg=s(Xt,"CODE",{});var Iz=i(tg);E7=a(Iz,"auto_wrap"),Iz.forEach(o),T7=a(Xt,` if you are not doing this yourself: `),og=s(Xt,"CODE",{});var Uz=i(og);$7=a(Uz,'--sharded_ddp "zero_dp_3 auto_wrap"'),Uz.forEach(o),k7=a(Xt,"."),Xt.forEach(o),Xb.forEach(o),b1=d(t),Cl=s(t,"P",{});var Nz=i(Cl);x7=a(Nz,"Sections that were moved:"),Nz.forEach(o),y1=d(t),y=s(t,"P",{});var E=i(y);A7=a(E,"[ "),Il=s(E,"A",{href:!0});var zz=i(Il);D7=a(zz,"DeepSpeed"),zz.forEach(o),rg=s(E,"A",{id:!0}),i(rg).forEach(o),q7=a(E,` | `),Ul=s(E,"A",{href:!0});var Lz=i(Ul);P7=a(Lz,"Installation"),Lz.forEach(o),ag=s(E,"A",{id:!0}),i(ag).forEach(o),S7=a(E,` | `),Nl=s(E,"A",{href:!0});var Fz=i(Nl);O7=a(Fz,"Deployment with multiple GPUs"),Fz.forEach(o),ng=s(E,"A",{id:!0}),i(ng).forEach(o),C7=a(E,` | `),zl=s(E,"A",{href:!0});var Rz=i(zl);I7=a(Rz,"Deployment with one GPU"),Rz.forEach(o),sg=s(E,"A",{id:!0}),i(sg).forEach(o),U7=a(E,` | `),Ll=s(E,"A",{href:!0});var Wz=i(Ll);N7=a(Wz,"Deployment in Notebooks"),Wz.forEach(o),ig=s(E,"A",{id:!0}),i(ig).forEach(o),z7=a(E,` | `),Fl=s(E,"A",{href:!0});var Gz=i(Fl);L7=a(Gz,"Configuration"),Gz.forEach(o),lg=s(E,"A",{id:!0}),i(lg).forEach(o),F7=a(E,` | `),Rl=s(E,"A",{href:!0});var jz=i(Rl);R7=a(jz,"Passing Configuration"),jz.forEach(o),dg=s(E,"A",{id:!0}),i(dg).forEach(o),W7=a(E,` | `),Wl=s(E,"A",{href:!0});var Mz=i(Wl);G7=a(Mz,"Shared Configuration"),Mz.forEach(o),cg=s(E,"A",{id:!0}),i(cg).forEach(o),j7=a(E,` | `),Gl=s(E,"A",{href:!0});var Vz=i(Gl);M7=a(Vz,"ZeRO"),Vz.forEach(o),pg=s(E,"A",{id:!0}),i(pg).forEach(o),V7=a(E,` | `),jl=s(E,"A",{href:!0});var Hz=i(jl);H7=a(Hz,"ZeRO-2 Config"),Hz.forEach(o),mg=s(E,"A",{id:!0}),i(mg).forEach(o),B7=a(E,` | `),Ml=s(E,"A",{href:!0});var Bz=i(Ml);Y7=a(Bz,"ZeRO-3 Config"),Bz.forEach(o),hg=s(E,"A",{id:!0}),i(hg).forEach(o),Z7=a(E,` | `),Vl=s(E,"A",{href:!0});var Yz=i(Vl);J7=a(Yz,"NVMe Support"),Yz.forEach(o),ug=s(E,"A",{id:!0}),i(ug).forEach(o),X7=a(E,` | `),Hl=s(E,"A",{href:!0});var Zz=i(Hl);K7=a(Zz,"ZeRO-2 vs ZeRO-3 Performance"),Zz.forEach(o),fg=s(E,"A",{id:!0}),i(fg).forEach(o),Q7=a(E,` | `),Bl=s(E,"A",{href:!0});var Jz=i(Bl);eq=a(Jz,"ZeRO-2 Example"),Jz.forEach(o),gg=s(E,"A",{id:!0}),i(gg).forEach(o),tq=a(E,` | `),Yl=s(E,"A",{href:!0});var Xz=i(Yl);oq=a(Xz,"ZeRO-3 Example"),Xz.forEach(o),_g=s(E,"A",{id:!0}),i(_g).forEach(o),rq=a(E,` | `),Zl=s(E,"A",{href:!0});var Kz=i(Zl);aq=a(Kz,"Optimizer"),Kz.forEach(o),vg=s(E,"A",{id:!0}),i(vg).forEach(o),nq=a(E,` | `),Jl=s(E,"A",{href:!0});var Qz=i(Jl);sq=a(Qz,"Scheduler"),Qz.forEach(o),bg=s(E,"A",{id:!0}),i(bg).forEach(o),iq=a(E,` | `),Xl=s(E,"A",{href:!0});var eL=i(Xl);lq=a(eL,"fp32 Precision"),eL.forEach(o),yg=s(E,"A",{id:!0}),i(yg).forEach(o),dq=a(E,` | `),Kl=s(E,"A",{href:!0});var tL=i(Kl);cq=a(tL,"Automatic Mixed Precision"),tL.forEach(o),wg=s(E,"A",{id:!0}),i(wg).forEach(o),pq=a(E,` | `),Ql=s(E,"A",{href:!0});var oL=i(Ql);mq=a(oL,"Batch Size"),oL.forEach(o),Eg=s(E,"A",{id:!0}),i(Eg).forEach(o),hq=a(E,` | `),ed=s(E,"A",{href:!0});var rL=i(ed);uq=a(rL,"Gradient Accumulation"),rL.forEach(o),Tg=s(E,"A",{id:!0}),i(Tg).forEach(o),fq=a(E,` | `),td=s(E,"A",{href:!0});var aL=i(td);gq=a(aL,"Gradient Clipping"),aL.forEach(o),$g=s(E,"A",{id:!0}),i($g).forEach(o),_q=a(E,` | `),od=s(E,"A",{href:!0});var nL=i(od);vq=a(nL,"Getting The Model Weights Out"),nL.forEach(o),kg=s(E,"A",{id:!0}),i(kg).forEach(o),bq=a(E,` ]`),E.forEach(o),this.h()},h(){m(T,"name","hf:doc:metadata"),m(T,"content",JSON.stringify(fL)),m(S,"id","trainer"),m(S,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(S,"href","#trainer"),m(x,"class","relative group"),m(le,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),m(Y,"href","../examples"),m(oe,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),m(ve,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments"),m(be,"href","https://github.com/NVIDIA/apex"),m(be,"rel","nofollow"),m(ye,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),m(ui,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),m(fi,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),m(gi,"href","callback"),m(Go,"id","transformers.Trainer"),m(Go,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Go,"href","#transformers.Trainer"),m(Qt,"class","relative group"),m(_i,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),m(Mo,"class","docstring"),m(Vo,"class","docstring"),m(dt,"class","docstring"),m(ct,"class","docstring"),m(pt,"class","docstring"),m(Ho,"class","docstring"),m(Pe,"class","docstring"),m(mt,"class","docstring"),m(bi,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),m(Bo,"class","docstring"),m(ht,"class","docstring"),m(Yo,"class","docstring"),m(ut,"class","docstring"),m(Se,"class","docstring"),m(ft,"class","docstring"),m(Jo,"class","docstring"),m(Xo,"class","docstring"),m(Ko,"class","docstring"),m(gt,"class","docstring"),m(wi,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),m(Ha,"href","https://github.com/pytorch/pytorch/issues/16266"),m(Ha,"rel","nofollow"),m(Ei,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),m(Ti,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),m(k,"class","docstring"),m(Qo,"class","docstring"),m(vt,"class","docstring"),m(bt,"class","docstring"),m(X,"class","docstring"),m(yt,"class","docstring"),m(wt,"class","docstring"),m(tr,"class","docstring"),m(or,"class","docstring"),m(Oe,"class","docstring"),m(Et,"class","docstring"),m(Tt,"class","docstring"),m(rr,"class","docstring"),m($t,"class","docstring"),m(b,"class","docstring"),m(ar,"id","transformers.Seq2SeqTrainer"),m(ar,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(ar,"href","#transformers.Seq2SeqTrainer"),m(po,"class","relative group"),m(Ce,"class","docstring"),m(K,"class","docstring"),m(tt,"class","docstring"),m(sr,"id","transformers.TrainingArguments"),m(sr,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(sr,"href","#transformers.TrainingArguments"),m(go,"class","relative group"),m(ki,"href","/docs/transformers/pr_16143/en/internal/trainer_utils#transformers.HfArgumentParser"),m(xn,"href","https://docs.python.org/3/library/argparse#module-argparse"),m(xn,"rel","nofollow"),m(ce,"class","docstring"),m(ir,"class","docstring"),m(kt,"class","docstring"),m(lr,"class","docstring"),m(dr,"class","docstring"),m(cr,"class","docstring"),m(z,"class","docstring"),m(pr,"id","transformers.Seq2SeqTrainingArguments"),m(pr,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(pr,"href","#transformers.Seq2SeqTrainingArguments"),m(wo,"class","relative group"),m(xi,"href","/docs/transformers/pr_16143/en/internal/trainer_utils#transformers.HfArgumentParser"),m(Ln,"href","https://docs.python.org/3/library/argparse#module-argparse"),m(Ln,"rel","nofollow"),m(ae,"class","docstring"),m(mr,"id","checkpoints"),m(mr,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(mr,"href","#checkpoints"),m(To,"class","relative group"),m(Ai,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),m(Di,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments"),m(qi,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer.train"),m(Oi,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments"),m(_r,"id","logging"),m(_r,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(_r,"href","#logging"),m($o,"class","relative group"),m(Ii,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),m(Ui,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments"),m(Li,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments"),m(Fi,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),m(Ri,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),m(yr,"id","randomness"),m(yr,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(yr,"href","#randomness"),m(ko,"class","relative group"),m(Hi,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),m(Hn,"href","https://pytorch.org/docs/stable/notes/randomness"),m(Hn,"rel","nofollow"),m(wr,"id","specific-gpus-selection"),m(wr,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(wr,"href","#specific-gpus-selection"),m(xo,"class","relative group"),m(Yn,"href","https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html"),m(Yn,"rel","nofollow"),m(Jn,"href","https://github.com/huggingface/accelerate"),m(Jn,"rel","nofollow"),m(Xn,"href","https://github.com/microsoft/DeepSpeed"),m(Xn,"rel","nofollow"),m(Yi,"href","Deepspeed"),m(os,"href","https://pytorch.org/docs/stable/generated/torch.nn.DataParallel.html"),m(os,"rel","nofollow"),m(ls,"start","2"),m(Ar,"id","trainer-integrations"),m(Ar,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Ar,"href","#trainer-integrations"),m(Ao,"class","relative group"),m(il,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),m(ms,"href","https://github.com/microsoft/DeepSpeed"),m(ms,"rel","nofollow"),m(hs,"href","https://github.com/facebookresearch/fairscale/"),m(hs,"rel","nofollow"),m(us,"href","https://arxiv.org/abs/1910.02054"),m(us,"rel","nofollow"),m(dl,"id","zero-install-notes"),m(qr,"id","cuda-extension-installation-notes"),m(qr,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(qr,"href","#cuda-extension-installation-notes"),m(Do,"class","relative group"),m(gs,"href","https://github.com/facebookresearch/fairscale/issues"),m(gs,"rel","nofollow"),m(_s,"href","https://github.com/microsoft/DeepSpeed/issues"),m(_s,"rel","nofollow"),m(Pr,"id","possible-problem-1"),m(Pr,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Pr,"href","#possible-problem-1"),m(qo,"class","relative group"),m(ws,"href","https://www.google.com/search?q=ubuntu+cuda+10.2+install"),m(ws,"rel","nofollow"),m(Or,"id","possible-problem-2"),m(Or,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Or,"href","#possible-problem-2"),m(Po,"class","relative group"),m(Ir,"id","possible-problem-3"),m(Ir,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Ir,"href","#possible-problem-3"),m(So,"class","relative group"),m(Nr,"id","fairscale"),m(Nr,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Nr,"href","#fairscale"),m(Oo,"class","relative group"),m(qs,"href","https://github.com/facebookresearch/fairscale/"),m(qs,"rel","nofollow"),m(El,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),m(Ps,"href","https://arxiv.org/abs/1910.02054"),m(Ps,"rel","nofollow"),m(Is,"href","https://github.com/facebookresearch/fairscale/#installation"),m(Is,"rel","nofollow"),m(kl,"href","#zero-install-notes"),m(Ws,"href","https://github.com/facebookresearch/fairscale/issues"),m(Ws,"rel","nofollow"),m(Hs,"start","3"),m(Il,"href","./deepspeed#deepspeed-trainer-integration"),m(rg,"id","deepspeed"),m(Ul,"href","./deepspeed#deepspeed-installation"),m(ag,"id","installation"),m(Nl,"href","./deepspeed#deepspeed-multi-gpu"),m(ng,"id","deployment-with-multiple-gpus"),m(zl,"href","./deepspeed#deepspeed-one-gpu"),m(sg,"id","deployment-with-one-gpu"),m(Ll,"href","./deepspeed#deepspeed-notebook"),m(ig,"id","deployment-in-notebooks"),m(Fl,"href","./deepspeed#deepspeed-config"),m(lg,"id","configuration"),m(Rl,"href","./deepspeed#deepspeed-config-passing"),m(dg,"id","passing-configuration"),m(Wl,"href","./deepspeed#deepspeed-config-shared"),m(cg,"id","shared-configuration"),m(Gl,"href","./deepspeed#deepspeed-zero"),m(pg,"id","zero"),m(jl,"href","./deepspeed#deepspeed-zero2-config"),m(mg,"id","zero-2-config"),m(Ml,"href","./deepspeed#deepspeed-zero3-config"),m(hg,"id","zero-3-config"),m(Vl,"href","./deepspeed#deepspeed-nvme"),m(ug,"id","nvme-support"),m(Hl,"href","./deepspeed#deepspeed-zero2-zero3-performance"),m(fg,"id","zero-2-vs-zero-3-performance"),m(Bl,"href","./deepspeed#deepspeed-zero2-example"),m(gg,"id","zero-2-example"),m(Yl,"href","./deepspeed#deepspeed-zero3-example"),m(_g,"id","zero-3-example"),m(Zl,"href","./deepspeed#deepspeed-optimizer"),m(vg,"id","optimizer"),m(Jl,"href","./deepspeed#deepspeed-scheduler"),m(bg,"id","scheduler"),m(Xl,"href","./deepspeed#deepspeed-fp32"),m(yg,"id","fp32-precision"),m(Kl,"href","./deepspeed#deepspeed-amp"),m(wg,"id","automatic-mixed-precision"),m(Ql,"href","./deepspeed#deepspeed-bs"),m(Eg,"id","batch-size"),m(ed,"href","./deepspeed#deepspeed-grad-acc"),m(Tg,"id","gradient-accumulation"),m(td,"href","./deepspeed#deepspeed-grad-clip"),m($g,"id","gradient-clipping"),m(od,"href","./deepspeed#deepspeed-weight-extraction"),m(kg,"id","getting-the-model-weights-out")},m(t,c){e(document.head,T),p(t,L,c),p(t,x,c),e(x,S),e(S,fe),f(R,fe,null),e(x,W),e(x,V),e(V,ge),p(t,ee,c),p(t,G,c),e(G,ie),e(G,le),e(le,te),e(G,de),e(G,Y),e(Y,Ze),e(G,_e),p(t,N,c),p(t,C,c),e(C,at),e(C,oe),e(oe,nt),e(C,st),e(C,ve),e(ve,sa),e(C,ia),p(t,Je,c),p(t,Ae,c),e(Ae,la),e(Ae,be),e(be,da),e(Ae,ca),p(t,Z,c),p(t,H,c),e(H,Qs),e(H,ye),e(ye,Lo),e(H,ei),p(t,Kt,c),p(t,D,c),e(D,B),e(B,Fo),e(Fo,ti),e(B,oi),e(D,ri),e(D,ai),e(ai,sc),e(sc,Qb),e(ai,ey),e(D,ty),e(D,ni),e(ni,ic),e(ic,oy),e(ni,ry),e(D,ay),e(D,si),e(si,lc),e(lc,ny),e(si,sy),e(D,iy),e(D,it),e(it,dc),e(dc,ly),e(it,dy),e(it,cc),e(cc,cy),e(it,py),e(it,pc),e(pc,my),e(it,hy),e(D,uy),e(D,ii),e(ii,mc),e(mc,fy),e(ii,gy),e(D,_y),e(D,li),e(li,hc),e(hc,vy),e(li,by),e(D,yy),e(D,di),e(di,uc),e(uc,wy),e(di,Ey),e(D,Ty),e(D,ci),e(ci,fc),e(fc,$y),e(ci,ky),e(D,xy),e(D,pi),e(pi,gc),e(gc,Ay),e(pi,Dy),e(D,qy),e(D,mi),e(mi,_c),e(_c,Py),e(mi,Sy),e(D,Oy),e(D,hi),e(hi,vc),e(vc,Cy),e(hi,Iy),p(t,Og,c),f(Ro,t,c),p(t,Cg,c),p(t,Wo,c),e(Wo,Uy),e(Wo,ui),e(ui,Ny),e(Wo,zy),p(t,Ig,c),f(pa,t,c),p(t,Ug,c),p(t,lt,c),e(lt,Ly),e(lt,fi),e(fi,Fy),e(lt,Ry),e(lt,gi),e(gi,Wy),e(lt,Gy),p(t,Ng,c),p(t,Qt,c),e(Qt,Go),e(Go,bc),f(ma,bc,null),e(Qt,jy),e(Qt,yc),e(yc,My),p(t,zg,c),p(t,b,c),f(ha,b,null),e(b,Vy),e(b,wc),e(wc,Hy),e(b,By),e(b,Ec),e(Ec,Yy),e(b,Zy),e(b,we),e(we,jo),e(jo,Tc),e(Tc,Jy),e(jo,Xy),e(jo,_i),e(_i,Ky),e(jo,Qy),e(we,e2),e(we,J),e(J,$c),e($c,t2),e(J,o2),e(J,kc),e(kc,r2),e(J,a2),e(J,xc),e(xc,n2),e(J,s2),e(J,Ac),e(Ac,i2),e(J,l2),e(J,Dc),e(Dc,d2),e(J,c2),e(J,qc),e(qc,p2),e(J,m2),e(we,h2),e(we,vi),e(vi,Pc),e(Pc,u2),e(vi,f2),e(we,g2),e(we,De),e(De,Sc),e(Sc,_2),e(De,v2),e(De,Oc),e(Oc,b2),e(De,y2),e(De,Cc),e(Cc,w2),e(De,E2),e(De,Ic),e(Ic,T2),e(De,$2),e(we,k2),e(we,qe),e(qe,Uc),e(Uc,x2),e(qe,A2),e(qe,Nc),e(Nc,D2),e(qe,q2),e(qe,zc),e(zc,P2),e(qe,S2),e(qe,Lc),e(Lc,O2),e(qe,C2),e(b,I2),e(b,Mo),f(ua,Mo,null),e(Mo,U2),e(Mo,fa),e(fa,N2),e(fa,Fc),e(Fc,z2),e(fa,L2),e(b,F2),e(b,Vo),f(ga,Vo,null),e(Vo,R2),e(Vo,_a),e(_a,W2),e(_a,Rc),e(Rc,G2),e(_a,j2),e(b,M2),e(b,dt),f(va,dt,null),e(dt,V2),e(dt,Wc),e(Wc,H2),e(dt,B2),e(dt,Gc),e(Gc,Y2),e(b,Z2),e(b,ct),f(ba,ct,null),e(ct,J2),e(ct,jc),e(jc,X2),e(ct,K2),e(ct,ya),e(ya,Q2),e(ya,Mc),e(Mc,ew),e(ya,tw),e(b,ow),e(b,pt),f(wa,pt,null),e(pt,rw),e(pt,Vc),e(Vc,aw),e(pt,nw),e(pt,Xe),e(Xe,sw),e(Xe,Hc),e(Hc,iw),e(Xe,lw),e(Xe,Bc),e(Bc,dw),e(Xe,cw),e(Xe,Yc),e(Yc,pw),e(Xe,mw),e(b,hw),e(b,Ho),f(Ea,Ho,null),e(Ho,uw),e(Ho,Zc),e(Zc,fw),e(b,gw),e(b,Pe),f(Ta,Pe,null),e(Pe,_w),e(Pe,Jc),e(Jc,vw),e(Pe,bw),e(Pe,$a),e($a,yw),e($a,Xc),e(Xc,ww),e($a,Ew),e(Pe,Tw),e(Pe,Kc),e(Kc,$w),e(b,kw),e(b,mt),f(ka,mt,null),e(mt,xw),e(mt,eo),e(eo,Aw),e(eo,Qc),e(Qc,Dw),e(eo,qw),e(eo,ep),e(ep,Pw),e(eo,Sw),e(mt,Ow),e(mt,tp),e(tp,Cw),e(b,Iw),e(b,Bo),f(xa,Bo,null),e(Bo,Uw),e(Bo,Aa),e(Aa,Nw),e(Aa,bi),e(bi,zw),e(Aa,Lw),e(b,Fw),e(b,ht),f(Da,ht,null),e(ht,Rw),e(ht,qa),e(qa,Ww),e(qa,op),e(op,Gw),e(qa,jw),e(ht,Mw),e(ht,rp),e(rp,Vw),e(b,Hw),e(b,Yo),f(Pa,Yo,null),e(Yo,Bw),e(Yo,ap),e(ap,Yw),e(b,Zw),e(b,ut),f(Sa,ut,null),e(ut,Jw),e(ut,Oa),e(Oa,Xw),e(Oa,np),e(np,Kw),e(Oa,Qw),e(ut,e0),e(ut,sp),e(sp,t0),e(b,o0),e(b,Se),f(Ca,Se,null),e(Se,r0),e(Se,Ia),e(Ia,a0),e(Ia,ip),e(ip,n0),e(Ia,s0),e(Se,i0),e(Se,to),e(to,l0),e(to,lp),e(lp,d0),e(to,c0),e(to,dp),e(dp,p0),e(to,m0),e(Se,h0),e(Se,cp),e(cp,u0),e(b,f0),e(b,ft),f(Ua,ft,null),e(ft,g0),e(ft,Ee),e(Ee,_0),e(Ee,pp),e(pp,v0),e(Ee,b0),e(Ee,mp),e(mp,y0),e(Ee,w0),e(Ee,hp),e(hp,E0),e(Ee,T0),e(Ee,up),e(up,$0),e(Ee,k0),e(ft,x0),f(Zo,ft,null),e(b,A0),e(b,Jo),f(Na,Jo,null),e(Jo,D0),e(Jo,za),e(za,q0),e(za,fp),e(fp,P0),e(za,S0),e(b,O0),e(b,Xo),f(La,Xo,null),e(Xo,C0),e(Xo,gp),e(gp,I0),e(b,U0),e(b,Ko),f(Fa,Ko,null),e(Ko,N0),e(Ko,Ra),e(Ra,z0),e(Ra,_p),e(_p,L0),e(Ra,F0),e(b,R0),e(b,gt),f(Wa,gt,null),e(gt,W0),e(gt,Ga),e(Ga,G0),e(Ga,vp),e(vp,j0),e(Ga,M0),e(gt,V0),e(gt,bp),e(bp,H0),e(b,B0),e(b,k),f(ja,k,null),e(k,Y0),e(k,yp),e(yp,Z0),e(k,J0),e(k,wp),e(wp,X0),e(k,K0),e(k,Ep),e(Ep,Q0),e(k,eE),e(k,oo),e(oo,tE),e(oo,Tp),e(Tp,oE),e(oo,rE),e(oo,$p),e($p,aE),e(oo,nE),e(k,sE),e(k,kp),e(kp,iE),e(k,lE),f(Ma,k,null),e(k,dE),e(k,xp),e(xp,Ap),e(Ap,cE),e(k,pE),e(k,Ke),e(Ke,Te),e(Te,mE),e(Te,Dp),e(Dp,hE),e(Te,uE),e(Te,qp),e(qp,fE),e(Te,gE),e(Te,Pp),e(Pp,_E),e(Te,vE),e(Te,Sp),e(Sp,bE),e(Te,yE),e(Ke,wE),e(Ke,ro),e(ro,EE),e(ro,Op),e(Op,TE),e(ro,$E),e(ro,Cp),e(Cp,kE),e(ro,xE),e(Ke,AE),e(Ke,yi),e(yi,Ip),e(Ip,DE),e(yi,qE),e(Ke,PE),e(Ke,_t),e(_t,Up),e(Up,SE),e(_t,OE),e(_t,Np),e(Np,CE),e(_t,IE),e(_t,zp),e(zp,UE),e(_t,NE),e(k,zE),e(k,Lp),e(Lp,LE),e(k,FE),e(k,Fp),e(Fp,RE),e(k,WE),e(k,Va),e(Va,GE),e(Va,Rp),e(Rp,jE),e(Va,ME),e(k,VE),e(k,Qe),e(Qe,HE),e(Qe,Wp),e(Wp,BE),e(Qe,YE),e(Qe,Gp),e(Gp,ZE),e(Qe,JE),e(Qe,jp),e(jp,XE),e(Qe,KE),e(k,QE),e(k,re),e(re,e4),e(re,wi),e(wi,t4),e(re,o4),e(re,Mp),e(Mp,r4),e(re,a4),e(re,Vp),e(Vp,n4),e(re,s4),e(re,Hp),e(Hp,i4),e(re,l4),e(re,Bp),e(Bp,d4),e(re,c4),e(k,p4),e(k,I),e(I,m4),e(I,Yp),e(Yp,h4),e(I,u4),e(I,Zp),e(Zp,f4),e(I,g4),e(I,Jp),e(Jp,_4),e(I,v4),e(I,Xp),e(Xp,b4),e(I,y4),e(I,Ha),e(Ha,w4),e(I,E4),e(I,Kp),e(Kp,T4),e(I,$4),e(I,Qp),e(Qp,k4),e(I,x4),e(I,em),e(em,A4),e(I,D4),e(I,tm),e(tm,q4),e(I,P4),e(I,om),e(om,S4),e(I,O4),e(k,C4),e(k,$e),e($e,I4),e($e,Ei),e(Ei,U4),e($e,N4),e($e,rm),e(rm,z4),e($e,L4),e($e,Ti),e(Ti,F4),e($e,R4),e($e,am),e(am,W4),e($e,G4),e(k,j4),e(k,nm),e(nm,M4),e(b,V4),e(b,Qo),f(Ba,Qo,null),e(Qo,H4),e(Qo,sm),e(sm,B4),e(b,Y4),e(b,vt),f(Ya,vt,null),e(vt,Z4),e(vt,Za),e(Za,J4),e(Za,im),e(im,X4),e(Za,K4),e(vt,Q4),e(vt,$i),e($i,eT),e($i,lm),e(lm,tT),e(b,oT),e(b,bt),f(Ja,bt,null),e(bt,rT),e(bt,Xa),e(Xa,aT),e(Xa,dm),e(dm,nT),e(Xa,sT),e(bt,iT),e(bt,Ka),e(Ka,lT),e(Ka,cm),e(cm,dT),e(Ka,cT),e(b,pT),e(b,X),f(Qa,X,null),e(X,mT),e(X,pm),e(pm,hT),e(X,uT),e(X,en),e(en,fT),e(en,mm),e(mm,gT),e(en,_T),e(X,vT),f(er,X,null),e(X,bT),e(X,tn),e(tn,yT),e(tn,hm),e(hm,wT),e(tn,ET),e(X,TT),e(X,ao),e(ao,no),e(no,$T),e(no,um),e(um,kT),e(no,xT),e(no,fm),e(fm,AT),e(no,DT),e(ao,qT),e(ao,so),e(so,PT),e(so,gm),e(gm,ST),e(so,OT),e(so,_m),e(_m,CT),e(so,IT),e(ao,UT),e(ao,io),e(io,NT),e(io,vm),e(vm,zT),e(io,LT),e(io,bm),e(bm,FT),e(io,RT),e(b,WT),e(b,yt),f(on,yt,null),e(yt,GT),e(yt,lo),e(lo,jT),e(lo,ym),e(ym,MT),e(lo,VT),e(lo,wm),e(wm,HT),e(lo,BT),e(yt,YT),e(yt,Em),e(Em,ZT),e(b,JT),e(b,wt),f(rn,wt,null),e(wt,XT),e(wt,co),e(co,KT),e(co,Tm),e(Tm,QT),e(co,e3),e(co,$m),e($m,t3),e(co,o3),e(wt,r3),e(wt,km),e(km,a3),e(b,n3),e(b,tr),f(an,tr,null),e(tr,s3),e(tr,et),e(et,i3),e(et,xm),e(xm,l3),e(et,d3),e(et,Am),e(Am,c3),e(et,p3),e(et,Dm),e(Dm,m3),e(et,h3),e(b,u3),e(b,or),f(nn,or,null),e(or,f3),e(or,sn),e(sn,g3),e(sn,qm),e(qm,_3),e(sn,v3),e(b,b3),e(b,Oe),f(ln,Oe,null),e(Oe,y3),e(Oe,dn),e(dn,w3),e(dn,Pm),e(Pm,E3),e(dn,T3),e(Oe,$3),e(Oe,Sm),e(Sm,k3),e(Oe,x3),e(Oe,cn),e(cn,A3),e(cn,Om),e(Om,D3),e(cn,q3),e(b,P3),e(b,Et),f(pn,Et,null),e(Et,S3),e(Et,mn),e(mn,O3),e(mn,Cm),e(Cm,C3),e(mn,I3),e(Et,U3),e(Et,Im),e(Im,N3),e(b,z3),e(b,Tt),f(hn,Tt,null),e(Tt,L3),e(Tt,Um),e(Um,F3),e(Tt,R3),e(Tt,Nm),e(Nm,W3),e(b,G3),e(b,rr),f(un,rr,null),e(rr,j3),e(rr,zm),e(zm,M3),e(b,V3),e(b,$t),f(fn,$t,null),e($t,H3),e($t,Lm),e(Lm,B3),e($t,Y3),e($t,Fm),e(Fm,Z3),p(t,Lg,c),p(t,po,c),e(po,ar),e(ar,Rm),f(gn,Rm,null),e(po,J3),e(po,Wm),e(Wm,X3),p(t,Fg,c),p(t,tt,c),f(_n,tt,null),e(tt,K3),e(tt,Ce),f(vn,Ce,null),e(Ce,Q3),e(Ce,Gm),e(Gm,e6),e(Ce,t6),e(Ce,bn),e(bn,o6),e(bn,jm),e(jm,r6),e(bn,a6),e(Ce,n6),e(Ce,Mm),e(Mm,s6),e(tt,i6),e(tt,K),f(yn,K,null),e(K,l6),e(K,Vm),e(Vm,d6),e(K,c6),e(K,wn),e(wn,p6),e(wn,Hm),e(Hm,m6),e(wn,h6),e(K,u6),f(nr,K,null),e(K,f6),e(K,En),e(En,g6),e(En,Bm),e(Bm,_6),e(En,v6),e(K,b6),e(K,mo),e(mo,ho),e(ho,y6),e(ho,Ym),e(Ym,w6),e(ho,E6),e(ho,Zm),e(Zm,T6),e(ho,$6),e(mo,k6),e(mo,uo),e(uo,x6),e(uo,Jm),e(Jm,A6),e(uo,D6),e(uo,Xm),e(Xm,q6),e(uo,P6),e(mo,S6),e(mo,fo),e(fo,O6),e(fo,Km),e(Km,C6),e(fo,I6),e(fo,Qm),e(Qm,U6),e(fo,N6),p(t,Rg,c),p(t,go,c),e(go,sr),e(sr,eh),f(Tn,eh,null),e(go,z6),e(go,th),e(th,L6),p(t,Wg,c),p(t,z,c),f($n,z,null),e(z,F6),e(z,kn),e(kn,R6),e(kn,oh),e(oh,W6),e(kn,G6),e(z,j6),e(z,_o),e(_o,M6),e(_o,ki),e(ki,V6),e(_o,H6),e(_o,xn),e(xn,B6),e(_o,Y6),e(z,Z6),e(z,ce),f(An,ce,null),e(ce,J6),e(ce,rh),e(rh,X6),e(ce,K6),e(ce,vo),e(vo,Q6),e(vo,ah),e(ah,e$),e(vo,t$),e(vo,nh),e(nh,o$),e(vo,r$),e(ce,a$),e(ce,bo),e(bo,n$),e(bo,sh),e(sh,s$),e(bo,i$),e(bo,ih),e(ih,l$),e(bo,d$),e(ce,c$),e(ce,Dn),e(Dn,p$),e(Dn,lh),e(lh,m$),e(Dn,h$),e(z,u$),e(z,ir),f(qn,ir,null),e(ir,f$),e(ir,dh),e(dh,g$),e(z,_$),e(z,kt),f(Pn,kt,null),e(kt,v$),e(kt,ch),e(ch,b$),e(kt,y$),e(kt,yo),e(yo,w$),e(yo,ph),e(ph,E$),e(yo,T$),e(yo,mh),e(mh,$$),e(yo,k$),e(z,x$),e(z,lr),f(Sn,lr,null),e(lr,A$),e(lr,On),e(On,D$),e(On,hh),e(hh,q$),e(On,P$),e(z,S$),e(z,dr),f(Cn,dr,null),e(dr,O$),e(dr,uh),e(uh,C$),e(z,I$),e(z,cr),f(In,cr,null),e(cr,U$),e(cr,fh),e(fh,N$),p(t,Gg,c),p(t,wo,c),e(wo,pr),e(pr,gh),f(Un,gh,null),e(wo,z$),e(wo,_h),e(_h,L$),p(t,jg,c),p(t,ae,c),f(Nn,ae,null),e(ae,F$),e(ae,zn),e(zn,R$),e(zn,vh),e(vh,W$),e(zn,G$),e(ae,j$),e(ae,Eo),e(Eo,M$),e(Eo,xi),e(xi,V$),e(Eo,H$),e(Eo,Ln),e(Ln,B$),e(Eo,Y$),e(ae,Z$),e(ae,ne),e(ne,J$),e(ne,bh),e(bh,X$),e(ne,K$),e(ne,yh),e(yh,Q$),e(ne,ek),e(ne,wh),e(wh,tk),e(ne,ok),e(ne,Eh),e(Eh,rk),e(ne,ak),e(ne,Th),e(Th,nk),e(ne,sk),e(ae,ik),e(ae,q),e(q,lk),e(q,$h),e($h,dk),e(q,ck),e(q,kh),e(kh,pk),e(q,mk),e(q,xh),e(xh,hk),e(q,uk),e(q,Ah),e(Ah,fk),e(q,gk),e(q,Dh),e(Dh,_k),e(q,vk),e(q,qh),e(qh,bk),e(q,yk),e(q,Ph),e(Ph,wk),e(q,Ek),e(q,Sh),e(Sh,Tk),e(q,$k),e(q,Oh),e(Oh,kk),e(q,xk),e(q,Ch),e(Ch,Ak),e(q,Dk),e(q,Ih),e(Ih,qk),e(q,Pk),e(q,Uh),e(Uh,Sk),e(q,Ok),e(q,Nh),e(Nh,Ck),e(q,Ik),p(t,Mg,c),p(t,To,c),e(To,mr),e(mr,zh),f(Fn,zh,null),e(To,Uk),e(To,Lh),e(Lh,Nk),p(t,Vg,c),p(t,pe,c),e(pe,zk),e(pe,Ai),e(Ai,Lk),e(pe,Fk),e(pe,Fh),e(Fh,Rk),e(pe,Wk),e(pe,Di),e(Di,Gk),e(pe,jk),e(pe,Rh),e(Rh,Mk),e(pe,Vk),p(t,Hg,c),p(t,hr,c),e(hr,Hk),e(hr,qi),e(qi,Bk),e(hr,Yk),p(t,Bg,c),p(t,ur,c),e(ur,Pi),e(Pi,Wh),e(Wh,Zk),e(Pi,Jk),e(ur,Xk),e(ur,Si),e(Si,Gh),e(Gh,Kk),e(Si,Qk),p(t,Yg,c),p(t,Ie,c),e(Ie,e5),e(Ie,jh),e(jh,t5),e(Ie,o5),e(Ie,Mh),e(Mh,r5),e(Ie,a5),e(Ie,Oi),e(Oi,n5),e(Ie,s5),p(t,Zg,c),p(t,fr,c),e(fr,gr),e(gr,Vh),e(Vh,i5),e(gr,l5),e(gr,Hh),e(Hh,d5),e(gr,c5),e(fr,p5),e(fr,Ci),e(Ci,Bh),e(Bh,m5),e(Ci,h5),p(t,Jg,c),p(t,$o,c),e($o,_r),e(_r,Yh),f(Rn,Yh,null),e($o,u5),e($o,Zh),e(Zh,f5),p(t,Xg,c),p(t,Ue,c),e(Ue,g5),e(Ue,Ii),e(Ii,_5),e(Ue,v5),e(Ue,Jh),e(Jh,b5),e(Ue,y5),e(Ue,Xh),e(Xh,w5),e(Ue,E5),p(t,Kg,c),p(t,xt,c),e(xt,T5),e(xt,Kh),e(Kh,$5),e(xt,k5),e(xt,Ui),e(Ui,x5),e(xt,A5),p(t,Qg,c),p(t,vr,c),e(vr,Ni),e(Ni,Qh),e(Qh,D5),e(Ni,q5),e(vr,P5),e(vr,zi),e(zi,eu),e(eu,S5),e(zi,O5),p(t,e_,c),p(t,Ne,c),e(Ne,C5),e(Ne,Li),e(Li,I5),e(Ne,U5),e(Ne,tu),e(tu,N5),e(Ne,z5),e(Ne,ou),e(ou,L5),e(Ne,F5),p(t,t_,c),p(t,Q,c),e(Q,R5),e(Q,Fi),e(Fi,W5),e(Q,G5),e(Q,ru),e(ru,j5),e(Q,M5),e(Q,au),e(au,V5),e(Q,H5),e(Q,nu),e(nu,B5),e(Q,Y5),e(Q,Ri),e(Ri,Z5),e(Q,J5),p(t,o_,c),p(t,Wi,c),e(Wi,X5),p(t,r_,c),f(Wn,t,c),p(t,a_,c),p(t,Gi,c),e(Gi,K5),p(t,n_,c),f(Gn,t,c),p(t,s_,c),p(t,ji,c),e(ji,Q5),p(t,i_,c),f(jn,t,c),p(t,l_,c),p(t,Mi,c),e(Mi,ex),p(t,d_,c),p(t,Vi,c),e(Vi,tx),p(t,c_,c),f(Mn,t,c),p(t,p_,c),p(t,br,c),e(br,ox),e(br,su),e(su,rx),e(br,ax),p(t,m_,c),p(t,ko,c),e(ko,yr),e(yr,iu),f(Vn,iu,null),e(ko,nx),e(ko,lu),e(lu,sx),p(t,h_,c),p(t,me,c),e(me,ix),e(me,Hi),e(Hi,lx),e(me,dx),e(me,du),e(du,cx),e(me,px),e(me,cu),e(cu,mx),e(me,hx),e(me,pu),e(pu,ux),e(me,fx),p(t,u_,c),p(t,At,c),e(At,gx),e(At,Hn),e(Hn,_x),e(At,vx),e(At,mu),e(mu,bx),e(At,yx),p(t,f_,c),p(t,xo,c),e(xo,wr),e(wr,hu),f(Bn,hu,null),e(xo,wx),e(xo,uu),e(uu,Ex),p(t,g_,c),p(t,Bi,c),e(Bi,Tx),p(t,__,c),p(t,Er,c),e(Er,$x),e(Er,Yn),e(Yn,fu),e(fu,kx),e(Er,xx),p(t,v_,c),f(Zn,t,c),p(t,b_,c),p(t,Dt,c),e(Dt,Ax),e(Dt,Jn),e(Jn,gu),e(gu,Dx),e(Dt,qx),e(Dt,Xn),e(Xn,_u),e(_u,Px),e(Dt,Sx),p(t,y_,c),f(Kn,t,c),p(t,w_,c),f(Qn,t,c),p(t,E_,c),p(t,Tr,c),e(Tr,Ox),e(Tr,Yi),e(Yi,Cx),e(Tr,Ix),p(t,T_,c),p(t,Zi,c),e(Zi,Ux),p(t,$_,c),p(t,Ji,c),e(Ji,Nx),p(t,k_,c),p(t,Xi,c),e(Xi,vu),e(vu,bu),e(bu,zx),p(t,x_,c),p(t,$r,c),e($r,Lx),e($r,yu),e(yu,Fx),e($r,Rx),p(t,A_,c),p(t,Ki,c),e(Ki,Wx),p(t,D_,c),f(es,t,c),p(t,q_,c),p(t,qt,c),e(qt,Gx),e(qt,wu),e(wu,jx),e(qt,Mx),e(qt,Eu),e(Eu,Vx),e(qt,Hx),p(t,P_,c),p(t,Qi,c),e(Qi,Bx),p(t,S_,c),f(ts,t,c),p(t,O_,c),p(t,Pt,c),e(Pt,Yx),e(Pt,Tu),e(Tu,Zx),e(Pt,Jx),e(Pt,$u),e($u,Xx),e(Pt,Kx),p(t,C_,c),p(t,St,c),e(St,Qx),e(St,ku),e(ku,eA),e(St,tA),e(St,os),e(os,xu),e(xu,oA),e(St,rA),p(t,I_,c),f(rs,t,c),p(t,U_,c),p(t,el,c),e(el,aA),p(t,N_,c),f(as,t,c),p(t,z_,c),p(t,tl,c),e(tl,nA),p(t,L_,c),f(ns,t,c),p(t,F_,c),p(t,ol,c),e(ol,sA),p(t,R_,c),p(t,rl,c),e(rl,Au),e(Au,Du),e(Du,iA),p(t,W_,c),p(t,kr,c),e(kr,lA),e(kr,qu),e(qu,dA),e(kr,cA),p(t,G_,c),p(t,al,c),e(al,ss),e(ss,pA),e(ss,Pu),e(Pu,mA),e(ss,hA),p(t,j_,c),f(is,t,c),p(t,M_,c),p(t,ls,c),e(ls,Su),e(Su,uA),p(t,V_,c),f(ds,t,c),p(t,H_,c),p(t,Ot,c),e(Ot,fA),e(Ot,Ou),e(Ou,gA),e(Ot,_A),e(Ot,Cu),e(Cu,vA),e(Ot,bA),p(t,B_,c),p(t,nl,c),e(nl,yA),p(t,Y_,c),f(cs,t,c),p(t,Z_,c),p(t,sl,c),e(sl,wA),p(t,J_,c),p(t,xr,c),e(xr,EA),e(xr,Iu),e(Iu,TA),e(xr,$A),p(t,X_,c),p(t,Ao,c),e(Ao,Ar),e(Ar,Uu),f(ps,Uu,null),e(Ao,kA),e(Ao,Nu),e(Nu,xA),p(t,K_,c),p(t,Dr,c),e(Dr,AA),e(Dr,il),e(il,DA),e(Dr,qA),p(t,Q_,c),p(t,ze,c),e(ze,PA),e(ze,ms),e(ms,SA),e(ze,OA),e(ze,hs),e(hs,CA),e(ze,IA),e(ze,us),e(us,UA),e(ze,NA),p(t,ev,c),p(t,ll,c),e(ll,zA),p(t,tv,c),p(t,dl,c),p(t,ov,c),p(t,Do,c),e(Do,qr),e(qr,zu),f(fs,zu,null),e(Do,LA),e(Do,Lu),e(Lu,FA),p(t,rv,c),p(t,cl,c),e(cl,RA),p(t,av,c),p(t,Ct,c),e(Ct,WA),e(Ct,gs),e(gs,GA),e(Ct,jA),e(Ct,_s),e(_s,MA),e(Ct,VA),p(t,nv,c),p(t,pl,c),e(pl,HA),p(t,sv,c),f(vs,t,c),p(t,iv,c),p(t,ml,c),e(ml,BA),p(t,lv,c),p(t,It,c),e(It,YA),e(It,Fu),e(Fu,ZA),e(It,JA),e(It,Ru),e(Ru,XA),e(It,KA),p(t,dv,c),p(t,qo,c),e(qo,Pr),e(Pr,Wu),f(bs,Wu,null),e(qo,QA),e(qo,Gu),e(Gu,e9),p(t,cv,c),p(t,hl,c),e(hl,t9),p(t,pv,c),p(t,Le,c),e(Le,o9),e(Le,ju),e(ju,r9),e(Le,a9),e(Le,Mu),e(Mu,n9),e(Le,s9),e(Le,Vu),e(Vu,i9),e(Le,l9),p(t,mv,c),p(t,Ut,c),e(Ut,d9),e(Ut,Hu),e(Hu,c9),e(Ut,p9),e(Ut,Bu),e(Bu,m9),e(Ut,h9),p(t,hv,c),f(ys,t,c),p(t,uv,c),p(t,Sr,c),e(Sr,u9),e(Sr,ws),e(ws,f9),e(Sr,g9),p(t,fv,c),p(t,Po,c),e(Po,Or),e(Or,Yu),f(Es,Yu,null),e(Po,_9),e(Po,Zu),e(Zu,v9),p(t,gv,c),p(t,ul,c),e(ul,b9),p(t,_v,c),f(Ts,t,c),p(t,vv,c),p(t,Nt,c),e(Nt,y9),e(Nt,Ju),e(Ju,w9),e(Nt,E9),e(Nt,Xu),e(Xu,T9),e(Nt,$9),p(t,bv,c),p(t,fl,c),e(fl,k9),p(t,yv,c),f($s,t,c),p(t,wv,c),p(t,gl,c),e(gl,x9),p(t,Ev,c),p(t,Cr,c),e(Cr,A9),e(Cr,Ku),e(Ku,D9),e(Cr,q9),p(t,Tv,c),p(t,ot,c),e(ot,Qu),e(Qu,P9),e(ot,S9),e(ot,ef),e(ef,O9),e(ot,C9),e(ot,tf),e(tf,I9),e(ot,U9),p(t,$v,c),p(t,_l,c),e(_l,N9),p(t,kv,c),f(ks,t,c),p(t,xv,c),p(t,vl,c),e(vl,z9),p(t,Av,c),p(t,Fe,c),e(Fe,L9),e(Fe,of),e(of,F9),e(Fe,R9),e(Fe,rf),e(rf,W9),e(Fe,G9),e(Fe,af),e(af,j9),e(Fe,M9),p(t,Dv,c),p(t,So,c),e(So,Ir),e(Ir,nf),f(xs,nf,null),e(So,V9),e(So,sf),e(sf,H9),p(t,qv,c),p(t,zt,c),e(zt,B9),e(zt,lf),e(lf,Y9),e(zt,Z9),e(zt,df),e(df,J9),e(zt,X9),p(t,Pv,c),p(t,bl,c),e(bl,K9),p(t,Sv,c),p(t,yl,c),e(yl,Q9),p(t,Ov,c),p(t,Ur,c),e(Ur,e8),e(Ur,cf),e(cf,t8),e(Ur,o8),p(t,Cv,c),f(As,t,c),p(t,Iv,c),p(t,j,c),e(j,r8),e(j,pf),e(pf,a8),e(j,n8),e(j,mf),e(mf,s8),e(j,i8),e(j,hf),e(hf,l8),e(j,d8),e(j,uf),e(uf,c8),e(j,p8),e(j,ff),e(ff,m8),e(j,h8),e(j,gf),e(gf,u8),e(j,f8),p(t,Uv,c),p(t,wl,c),e(wl,g8),p(t,Nv,c),p(t,Oo,c),e(Oo,Nr),e(Nr,_f),f(Ds,_f,null),e(Oo,_8),e(Oo,vf),e(vf,v8),p(t,zv,c),p(t,Re,c),e(Re,b8),e(Re,qs),e(qs,y8),e(Re,w8),e(Re,El),e(El,E8),e(Re,T8),e(Re,Ps),e(Ps,$8),e(Re,k8),p(t,Lv,c),p(t,We,c),e(We,bf),e(bf,x8),e(We,A8),e(We,yf),e(yf,D8),e(We,q8),e(We,wf),e(wf,P8),e(We,S8),e(We,Ef),e(Ef,O8),p(t,Fv,c),p(t,Tl,c),e(Tl,C8),p(t,Rv,c),p(t,Ss,c),e(Ss,Tf),e(Tf,I8),e(Ss,U8),p(t,Wv,c),p(t,$l,c),e($l,N8),p(t,Gv,c),f(Os,t,c),p(t,jv,c),p(t,Lt,c),e(Lt,z8),e(Lt,$f),e($f,L8),e(Lt,F8),e(Lt,kf),e(kf,R8),e(Lt,W8),p(t,Mv,c),f(Cs,t,c),p(t,Vv,c),p(t,Ft,c),e(Ft,G8),e(Ft,xf),e(xf,j8),e(Ft,M8),e(Ft,Is),e(Is,V8),e(Ft,H8),p(t,Hv,c),p(t,zr,c),e(zr,B8),e(zr,kl),e(kl,Y8),e(zr,Z8),p(t,Bv,c),p(t,xl,c),e(xl,J8),p(t,Yv,c),p(t,Us,c),e(Us,Af),e(Af,X8),e(Us,K8),p(t,Zv,c),f(Ns,t,c),p(t,Jv,c),p(t,Al,c),e(Al,Q8),p(t,Xv,c),f(zs,t,c),p(t,Kv,c),p(t,Ls,c),e(Ls,Df),e(Df,eD),e(Ls,tD),p(t,Qv,c),f(Fs,t,c),p(t,e1,c),p(t,Dl,c),e(Dl,oD),p(t,t1,c),f(Rs,t,c),p(t,o1,c),p(t,ql,c),e(ql,rD),p(t,r1,c),p(t,Lr,c),e(Lr,aD),e(Lr,Ws),e(Ws,nD),e(Lr,sD),p(t,a1,c),p(t,Gs,c),e(Gs,qf),e(qf,iD),e(Gs,lD),p(t,n1,c),p(t,Rt,c),e(Rt,dD),e(Rt,Pf),e(Pf,cD),e(Rt,pD),e(Rt,Sf),e(Sf,mD),e(Rt,hD),p(t,s1,c),p(t,Fr,c),e(Fr,uD),e(Fr,Of),e(Of,fD),e(Fr,gD),p(t,i1,c),f(js,t,c),p(t,l1,c),p(t,Pl,c),e(Pl,_D),p(t,d1,c),p(t,Ge,c),e(Ge,Cf),e(Cf,vD),e(Ge,bD),e(Ge,If),e(If,yD),e(Ge,wD),e(Ge,Ms),e(Ms,ED),e(Ms,Uf),e(Uf,TD),e(Ms,$D),e(Ge,kD),e(Ge,Vs),e(Vs,xD),e(Vs,Nf),e(Nf,AD),e(Vs,DD),p(t,c1,c),p(t,Hs,c),e(Hs,rt),e(rt,qD),e(rt,zf),e(zf,PD),e(rt,SD),e(rt,Lf),e(Lf,OD),e(rt,CD),e(rt,Ff),e(Ff,ID),e(rt,UD),p(t,p1,c),p(t,Rr,c),e(Rr,ND),e(Rr,Rf),e(Rf,zD),e(Rr,LD),p(t,m1,c),f(Bs,t,c),p(t,h1,c),p(t,Co,c),e(Co,Wf),e(Wf,FD),e(Co,RD),e(Co,Gf),e(Gf,WD),e(Co,GD),p(t,u1,c),p(t,Wt,c),e(Wt,jD),e(Wt,jf),e(jf,MD),e(Wt,VD),e(Wt,Mf),e(Mf,HD),e(Wt,BD),p(t,f1,c),p(t,Sl,c),e(Sl,YD),p(t,g1,c),p(t,he,c),e(he,Vf),e(Vf,ZD),e(he,JD),e(he,Hf),e(Hf,XD),e(he,KD),e(he,Ys),e(Ys,QD),e(Ys,Bf),e(Bf,e7),e(Ys,t7),e(he,o7),e(he,Io),e(Io,r7),e(Io,Yf),e(Yf,a7),e(Io,n7),e(Io,Zf),e(Zf,s7),e(Io,i7),e(he,l7),e(he,Jf),e(Jf,d7),p(t,_1,c),p(t,Ol,c),e(Ol,c7),p(t,v1,c),p(t,Wr,c),e(Wr,Uo),e(Uo,p7),e(Uo,Xf),e(Xf,m7),e(Uo,h7),e(Uo,Kf),e(Kf,u7),e(Uo,f7),e(Wr,g7),e(Wr,ke),e(ke,_7),e(ke,Qf),e(Qf,v7),e(ke,b7),e(ke,eg),e(eg,y7),e(ke,w7),e(ke,tg),e(tg,E7),e(ke,T7),e(ke,og),e(og,$7),e(ke,k7),p(t,b1,c),p(t,Cl,c),e(Cl,x7),p(t,y1,c),p(t,y,c),e(y,A7),e(y,Il),e(Il,D7),e(y,rg),e(y,q7),e(y,Ul),e(Ul,P7),e(y,ag),e(y,S7),e(y,Nl),e(Nl,O7),e(y,ng),e(y,C7),e(y,zl),e(zl,I7),e(y,sg),e(y,U7),e(y,Ll),e(Ll,N7),e(y,ig),e(y,z7),e(y,Fl),e(Fl,L7),e(y,lg),e(y,F7),e(y,Rl),e(Rl,R7),e(y,dg),e(y,W7),e(y,Wl),e(Wl,G7),e(y,cg),e(y,j7),e(y,Gl),e(Gl,M7),e(y,pg),e(y,V7),e(y,jl),e(jl,H7),e(y,mg),e(y,B7),e(y,Ml),e(Ml,Y7),e(y,hg),e(y,Z7),e(y,Vl),e(Vl,J7),e(y,ug),e(y,X7),e(y,Hl),e(Hl,K7),e(y,fg),e(y,Q7),e(y,Bl),e(Bl,eq),e(y,gg),e(y,tq),e(y,Yl),e(Yl,oq),e(y,_g),e(y,rq),e(y,Zl),e(Zl,aq),e(y,vg),e(y,nq),e(y,Jl),e(Jl,sq),e(y,bg),e(y,iq),e(y,Xl),e(Xl,lq),e(y,yg),e(y,dq),e(y,Kl),e(Kl,cq),e(y,wg),e(y,pq),e(y,Ql),e(Ql,mq),e(y,Eg),e(y,hq),e(y,ed),e(ed,uq),e(y,Tg),e(y,fq),e(y,td),e(td,gq),e(y,$g),e(y,_q),e(y,od),e(od,vq),e(y,kg),e(y,bq),w1=!0},p(t,[c]){const Zs={};c&2&&(Zs.$$scope={dirty:c,ctx:t}),Ro.$set(Zs);const xg={};c&2&&(xg.$$scope={dirty:c,ctx:t}),Zo.$set(xg);const Ag={};c&2&&(Ag.$$scope={dirty:c,ctx:t}),er.$set(Ag);const Dg={};c&2&&(Dg.$$scope={dirty:c,ctx:t}),nr.$set(Dg)},i(t){w1||(g(R.$$.fragment,t),g(Ro.$$.fragment,t),g(pa.$$.fragment,t),g(ma.$$.fragment,t),g(ha.$$.fragment,t),g(ua.$$.fragment,t),g(ga.$$.fragment,t),g(va.$$.fragment,t),g(ba.$$.fragment,t),g(wa.$$.fragment,t),g(Ea.$$.fragment,t),g(Ta.$$.fragment,t),g(ka.$$.fragment,t),g(xa.$$.fragment,t),g(Da.$$.fragment,t),g(Pa.$$.fragment,t),g(Sa.$$.fragment,t),g(Ca.$$.fragment,t),g(Ua.$$.fragment,t),g(Zo.$$.fragment,t),g(Na.$$.fragment,t),g(La.$$.fragment,t),g(Fa.$$.fragment,t),g(Wa.$$.fragment,t),g(ja.$$.fragment,t),g(Ma.$$.fragment,t),g(Ba.$$.fragment,t),g(Ya.$$.fragment,t),g(Ja.$$.fragment,t),g(Qa.$$.fragment,t),g(er.$$.fragment,t),g(on.$$.fragment,t),g(rn.$$.fragment,t),g(an.$$.fragment,t),g(nn.$$.fragment,t),g(ln.$$.fragment,t),g(pn.$$.fragment,t),g(hn.$$.fragment,t),g(un.$$.fragment,t),g(fn.$$.fragment,t),g(gn.$$.fragment,t),g(_n.$$.fragment,t),g(vn.$$.fragment,t),g(yn.$$.fragment,t),g(nr.$$.fragment,t),g(Tn.$$.fragment,t),g($n.$$.fragment,t),g(An.$$.fragment,t),g(qn.$$.fragment,t),g(Pn.$$.fragment,t),g(Sn.$$.fragment,t),g(Cn.$$.fragment,t),g(In.$$.fragment,t),g(Un.$$.fragment,t),g(Nn.$$.fragment,t),g(Fn.$$.fragment,t),g(Rn.$$.fragment,t),g(Wn.$$.fragment,t),g(Gn.$$.fragment,t),g(jn.$$.fragment,t),g(Mn.$$.fragment,t),g(Vn.$$.fragment,t),g(Bn.$$.fragment,t),g(Zn.$$.fragment,t),g(Kn.$$.fragment,t),g(Qn.$$.fragment,t),g(es.$$.fragment,t),g(ts.$$.fragment,t),g(rs.$$.fragment,t),g(as.$$.fragment,t),g(ns.$$.fragment,t),g(is.$$.fragment,t),g(ds.$$.fragment,t),g(cs.$$.fragment,t),g(ps.$$.fragment,t),g(fs.$$.fragment,t),g(vs.$$.fragment,t),g(bs.$$.fragment,t),g(ys.$$.fragment,t),g(Es.$$.fragment,t),g(Ts.$$.fragment,t),g($s.$$.fragment,t),g(ks.$$.fragment,t),g(xs.$$.fragment,t),g(As.$$.fragment,t),g(Ds.$$.fragment,t),g(Os.$$.fragment,t),g(Cs.$$.fragment,t),g(Ns.$$.fragment,t),g(zs.$$.fragment,t),g(Fs.$$.fragment,t),g(Rs.$$.fragment,t),g(js.$$.fragment,t),g(Bs.$$.fragment,t),w1=!0)},o(t){_(R.$$.fragment,t),_(Ro.$$.fragment,t),_(pa.$$.fragment,t),_(ma.$$.fragment,t),_(ha.$$.fragment,t),_(ua.$$.fragment,t),_(ga.$$.fragment,t),_(va.$$.fragment,t),_(ba.$$.fragment,t),_(wa.$$.fragment,t),_(Ea.$$.fragment,t),_(Ta.$$.fragment,t),_(ka.$$.fragment,t),_(xa.$$.fragment,t),_(Da.$$.fragment,t),_(Pa.$$.fragment,t),_(Sa.$$.fragment,t),_(Ca.$$.fragment,t),_(Ua.$$.fragment,t),_(Zo.$$.fragment,t),_(Na.$$.fragment,t),_(La.$$.fragment,t),_(Fa.$$.fragment,t),_(Wa.$$.fragment,t),_(ja.$$.fragment,t),_(Ma.$$.fragment,t),_(Ba.$$.fragment,t),_(Ya.$$.fragment,t),_(Ja.$$.fragment,t),_(Qa.$$.fragment,t),_(er.$$.fragment,t),_(on.$$.fragment,t),_(rn.$$.fragment,t),_(an.$$.fragment,t),_(nn.$$.fragment,t),_(ln.$$.fragment,t),_(pn.$$.fragment,t),_(hn.$$.fragment,t),_(un.$$.fragment,t),_(fn.$$.fragment,t),_(gn.$$.fragment,t),_(_n.$$.fragment,t),_(vn.$$.fragment,t),_(yn.$$.fragment,t),_(nr.$$.fragment,t),_(Tn.$$.fragment,t),_($n.$$.fragment,t),_(An.$$.fragment,t),_(qn.$$.fragment,t),_(Pn.$$.fragment,t),_(Sn.$$.fragment,t),_(Cn.$$.fragment,t),_(In.$$.fragment,t),_(Un.$$.fragment,t),_(Nn.$$.fragment,t),_(Fn.$$.fragment,t),_(Rn.$$.fragment,t),_(Wn.$$.fragment,t),_(Gn.$$.fragment,t),_(jn.$$.fragment,t),_(Mn.$$.fragment,t),_(Vn.$$.fragment,t),_(Bn.$$.fragment,t),_(Zn.$$.fragment,t),_(Kn.$$.fragment,t),_(Qn.$$.fragment,t),_(es.$$.fragment,t),_(ts.$$.fragment,t),_(rs.$$.fragment,t),_(as.$$.fragment,t),_(ns.$$.fragment,t),_(is.$$.fragment,t),_(ds.$$.fragment,t),_(cs.$$.fragment,t),_(ps.$$.fragment,t),_(fs.$$.fragment,t),_(vs.$$.fragment,t),_(bs.$$.fragment,t),_(ys.$$.fragment,t),_(Es.$$.fragment,t),_(Ts.$$.fragment,t),_($s.$$.fragment,t),_(ks.$$.fragment,t),_(xs.$$.fragment,t),_(As.$$.fragment,t),_(Ds.$$.fragment,t),_(Os.$$.fragment,t),_(Cs.$$.fragment,t),_(Ns.$$.fragment,t),_(zs.$$.fragment,t),_(Fs.$$.fragment,t),_(Rs.$$.fragment,t),_(js.$$.fragment,t),_(Bs.$$.fragment,t),w1=!1},d(t){o(T),t&&o(L),t&&o(x),v(R),t&&o(ee),t&&o(G),t&&o(N),t&&o(C),t&&o(Je),t&&o(Ae),t&&o(Z),t&&o(H),t&&o(Kt),t&&o(D),t&&o(Og),v(Ro,t),t&&o(Cg),t&&o(Wo),t&&o(Ig),v(pa,t),t&&o(Ug),t&&o(lt),t&&o(Ng),t&&o(Qt),v(ma),t&&o(zg),t&&o(b),v(ha),v(ua),v(ga),v(va),v(ba),v(wa),v(Ea),v(Ta),v(ka),v(xa),v(Da),v(Pa),v(Sa),v(Ca),v(Ua),v(Zo),v(Na),v(La),v(Fa),v(Wa),v(ja),v(Ma),v(Ba),v(Ya),v(Ja),v(Qa),v(er),v(on),v(rn),v(an),v(nn),v(ln),v(pn),v(hn),v(un),v(fn),t&&o(Lg),t&&o(po),v(gn),t&&o(Fg),t&&o(tt),v(_n),v(vn),v(yn),v(nr),t&&o(Rg),t&&o(go),v(Tn),t&&o(Wg),t&&o(z),v($n),v(An),v(qn),v(Pn),v(Sn),v(Cn),v(In),t&&o(Gg),t&&o(wo),v(Un),t&&o(jg),t&&o(ae),v(Nn),t&&o(Mg),t&&o(To),v(Fn),t&&o(Vg),t&&o(pe),t&&o(Hg),t&&o(hr),t&&o(Bg),t&&o(ur),t&&o(Yg),t&&o(Ie),t&&o(Zg),t&&o(fr),t&&o(Jg),t&&o($o),v(Rn),t&&o(Xg),t&&o(Ue),t&&o(Kg),t&&o(xt),t&&o(Qg),t&&o(vr),t&&o(e_),t&&o(Ne),t&&o(t_),t&&o(Q),t&&o(o_),t&&o(Wi),t&&o(r_),v(Wn,t),t&&o(a_),t&&o(Gi),t&&o(n_),v(Gn,t),t&&o(s_),t&&o(ji),t&&o(i_),v(jn,t),t&&o(l_),t&&o(Mi),t&&o(d_),t&&o(Vi),t&&o(c_),v(Mn,t),t&&o(p_),t&&o(br),t&&o(m_),t&&o(ko),v(Vn),t&&o(h_),t&&o(me),t&&o(u_),t&&o(At),t&&o(f_),t&&o(xo),v(Bn),t&&o(g_),t&&o(Bi),t&&o(__),t&&o(Er),t&&o(v_),v(Zn,t),t&&o(b_),t&&o(Dt),t&&o(y_),v(Kn,t),t&&o(w_),v(Qn,t),t&&o(E_),t&&o(Tr),t&&o(T_),t&&o(Zi),t&&o($_),t&&o(Ji),t&&o(k_),t&&o(Xi),t&&o(x_),t&&o($r),t&&o(A_),t&&o(Ki),t&&o(D_),v(es,t),t&&o(q_),t&&o(qt),t&&o(P_),t&&o(Qi),t&&o(S_),v(ts,t),t&&o(O_),t&&o(Pt),t&&o(C_),t&&o(St),t&&o(I_),v(rs,t),t&&o(U_),t&&o(el),t&&o(N_),v(as,t),t&&o(z_),t&&o(tl),t&&o(L_),v(ns,t),t&&o(F_),t&&o(ol),t&&o(R_),t&&o(rl),t&&o(W_),t&&o(kr),t&&o(G_),t&&o(al),t&&o(j_),v(is,t),t&&o(M_),t&&o(ls),t&&o(V_),v(ds,t),t&&o(H_),t&&o(Ot),t&&o(B_),t&&o(nl),t&&o(Y_),v(cs,t),t&&o(Z_),t&&o(sl),t&&o(J_),t&&o(xr),t&&o(X_),t&&o(Ao),v(ps),t&&o(K_),t&&o(Dr),t&&o(Q_),t&&o(ze),t&&o(ev),t&&o(ll),t&&o(tv),t&&o(dl),t&&o(ov),t&&o(Do),v(fs),t&&o(rv),t&&o(cl),t&&o(av),t&&o(Ct),t&&o(nv),t&&o(pl),t&&o(sv),v(vs,t),t&&o(iv),t&&o(ml),t&&o(lv),t&&o(It),t&&o(dv),t&&o(qo),v(bs),t&&o(cv),t&&o(hl),t&&o(pv),t&&o(Le),t&&o(mv),t&&o(Ut),t&&o(hv),v(ys,t),t&&o(uv),t&&o(Sr),t&&o(fv),t&&o(Po),v(Es),t&&o(gv),t&&o(ul),t&&o(_v),v(Ts,t),t&&o(vv),t&&o(Nt),t&&o(bv),t&&o(fl),t&&o(yv),v($s,t),t&&o(wv),t&&o(gl),t&&o(Ev),t&&o(Cr),t&&o(Tv),t&&o(ot),t&&o($v),t&&o(_l),t&&o(kv),v(ks,t),t&&o(xv),t&&o(vl),t&&o(Av),t&&o(Fe),t&&o(Dv),t&&o(So),v(xs),t&&o(qv),t&&o(zt),t&&o(Pv),t&&o(bl),t&&o(Sv),t&&o(yl),t&&o(Ov),t&&o(Ur),t&&o(Cv),v(As,t),t&&o(Iv),t&&o(j),t&&o(Uv),t&&o(wl),t&&o(Nv),t&&o(Oo),v(Ds),t&&o(zv),t&&o(Re),t&&o(Lv),t&&o(We),t&&o(Fv),t&&o(Tl),t&&o(Rv),t&&o(Ss),t&&o(Wv),t&&o($l),t&&o(Gv),v(Os,t),t&&o(jv),t&&o(Lt),t&&o(Mv),v(Cs,t),t&&o(Vv),t&&o(Ft),t&&o(Hv),t&&o(zr),t&&o(Bv),t&&o(xl),t&&o(Yv),t&&o(Us),t&&o(Zv),v(Ns,t),t&&o(Jv),t&&o(Al),t&&o(Xv),v(zs,t),t&&o(Kv),t&&o(Ls),t&&o(Qv),v(Fs,t),t&&o(e1),t&&o(Dl),t&&o(t1),v(Rs,t),t&&o(o1),t&&o(ql),t&&o(r1),t&&o(Lr),t&&o(a1),t&&o(Gs),t&&o(n1),t&&o(Rt),t&&o(s1),t&&o(Fr),t&&o(i1),v(js,t),t&&o(l1),t&&o(Pl),t&&o(d1),t&&o(Ge),t&&o(c1),t&&o(Hs),t&&o(p1),t&&o(Rr),t&&o(m1),v(Bs,t),t&&o(h1),t&&o(Co),t&&o(u1),t&&o(Wt),t&&o(f1),t&&o(Sl),t&&o(g1),t&&o(he),t&&o(_1),t&&o(Ol),t&&o(v1),t&&o(Wr),t&&o(b1),t&&o(Cl),t&&o(y1),t&&o(y)}}}const fL={local:"trainer",sections:[{local:"transformers.Trainer",title:"Trainer"},{local:"transformers.Seq2SeqTrainer",title:"Seq2SeqTrainer"},{local:"transformers.TrainingArguments",title:"TrainingArguments"},{local:"transformers.Seq2SeqTrainingArguments",title:"Seq2SeqTrainingArguments"},{local:"checkpoints",title:"Checkpoints"},{local:"logging",title:"Logging"},{local:"randomness",title:"Randomness"},{local:"specific-gpus-selection",title:"Specific GPUs Selection"},{local:"trainer-integrations",sections:[{local:"cuda-extension-installation-notes",sections:[{local:"possible-problem-1",title:"Possible problem #1"},{local:"possible-problem-2",title:"Possible problem #2"},{local:"possible-problem-3",title:"Possible problem #3"}],title:"CUDA Extension Installation Notes"},{local:"fairscale",title:"FairScale"}],title:"Trainer Integrations"}],title:"Trainer"};function gL(Ye,T,L){let{fw:x}=T;return Ye.$$set=S=>{"fw"in S&&L(0,x=S.fw)},[x]}class TL extends sL{constructor(T){super();iL(this,T,gL,uL,lL,{fw:0})}}export{TL as default,fL as metadata};
401
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/main_classes/model.mdx-5e2df875.js
import{S as tw,i as ow,s as nw,e as o,k as l,w as c,t as s,M as rw,c as n,d as t,m as d,a as r,x as p,h as i,b as m,F as e,g as y,y as h,q as f,o as u,B as g}from"../../chunks/vendor-4833417e.js";import{T as fl}from"../../chunks/Tip-fffd6df1.js";import{D as v}from"../../chunks/Docstring-4f315ed9.js";import{C as Q}from"../../chunks/CodeBlock-6a3d1b46.js";import{I as ao}from"../../chunks/IconCopyLink-4b81c553.js";import"../../chunks/CopyButton-dacfbfaf.js";function aw(S){let w,k;return{c(){w=o("p"),k=s("Passing `use_auth_token=True\u201C is required when you want to use a private model.")},l(x){w=n(x,"P",{});var E=r(w);k=i(E,"Passing `use_auth_token=True\u201C is required when you want to use a private model."),E.forEach(t)},m(x,E){y(x,w,E),e(w,k)},d(x){x&&t(w)}}}function sw(S){let w,k,x,E,W;return{c(){w=o("p"),k=s("Activate the special "),x=o("a"),E=s("\u201Coffline-mode\u201D"),W=s(` to use this method in a firewalled environment.`),this.h()},l(A){w=n(A,"P",{});var L=r(w);k=i(L,"Activate the special "),x=n(L,"A",{href:!0,rel:!0});var de=r(x);E=i(de,"\u201Coffline-mode\u201D"),de.forEach(t),W=i(L,` to use this method in a firewalled environment.`),L.forEach(t),this.h()},h(){m(x,"href","https://huggingface.co/transformers/installation.html#offline-mode"),m(x,"rel","nofollow")},m(A,L){y(A,w,L),e(w,k),e(w,x),e(x,E),e(w,W)},d(A){A&&t(w)}}}function iw(S){let w,k;return{c(){w=o("p"),k=s("This API is experimental and may have some slight breaking changes in the next releases.")},l(x){w=n(x,"P",{});var E=r(w);k=i(E,"This API is experimental and may have some slight breaking changes in the next releases."),E.forEach(t)},m(x,E){y(x,w,E),e(w,k)},d(x){x&&t(w)}}}function lw(S){let w,k,x,E,W;return{c(){w=o("p"),k=s("Passing "),x=o("code"),E=s("use_auth_token=True"),W=s(" is required when you want to use a private model.")},l(A){w=n(A,"P",{});var L=r(w);k=i(L,"Passing "),x=n(L,"CODE",{});var de=r(x);E=i(de,"use_auth_token=True"),de.forEach(t),W=i(L," is required when you want to use a private model."),L.forEach(t)},m(A,L){y(A,w,L),e(w,k),e(w,x),e(x,E),e(w,W)},d(A){A&&t(w)}}}function dw(S){let w,k;return{c(){w=o("p"),k=s("This API is experimental and may have some slight breaking changes in the next releases.")},l(x){w=n(x,"P",{});var E=r(w);k=i(E,"This API is experimental and may have some slight breaking changes in the next releases."),E.forEach(t)},m(x,E){y(x,w,E),e(w,k)},d(x){x&&t(w)}}}function mw(S){let w,k,x,E,W,A,L,de,Jd,ul,Z,Qd,sr,Zd,em,ir,tm,om,lr,nm,rm,gl,Le,dr,am,sm,mr,im,lm,_l,Ze,Qr,dm,mm,Zr,cm,bl,z,pm,cr,hm,fm,ea,um,gm,pr,_m,bm,hr,vm,ym,fr,$m,wm,vl,ze,et,ta,so,Tm,oa,xm,yl,T,io,Pm,na,Em,Mm,ur,gr,km,jm,Dm,lo,ra,Fm,qm,aa,Am,Cm,sa,Im,Lm,G,ia,ge,la,zm,Om,_r,Um,Nm,br,Hm,Xm,Bm,mo,_e,da,Vm,Sm,ma,Wm,Gm,ca,Rm,Ym,Km,Oe,tt,pa,Jm,Qm,vr,Zm,ec,tc,ot,ha,oc,nc,fa,rc,ac,sc,nt,ua,ic,lc,ga,dc,mc,cc,_a,rt,ba,pc,hc,va,fc,uc,gc,ya,at,$a,_c,bc,wa,vc,yc,$c,Ta,U,xa,wc,Tc,Pa,xc,Pc,Ea,Ec,Mc,Ma,kc,jc,ka,Dc,Fc,qc,ee,co,Ac,po,Cc,ja,Ic,Lc,zc,Da,Oc,Uc,ho,Nc,F,fo,Hc,Fa,Xc,Bc,Ue,Vc,qa,Sc,Wc,Aa,Gc,Rc,Yc,uo,Kc,Ca,Jc,Qc,Zc,go,ep,Ia,tp,op,np,st,rp,it,ap,La,sp,ip,_o,lp,lt,bo,dp,za,mp,cp,dt,vo,pp,Oa,hp,fp,be,yo,up,Ua,gp,_p,Na,bp,vp,ve,$o,yp,Ha,$p,wp,Xa,Tp,xp,mt,wo,Pp,Ba,Ep,Mp,ct,To,kp,Va,jp,Dp,pt,xo,Fp,Sa,qp,Ap,ye,Po,Cp,Wa,Ip,Lp,ht,zp,$e,Eo,Op,Mo,Up,Ga,Np,Hp,Xp,ko,Bp,Ra,Vp,Sp,Wp,ft,jo,Gp,Do,Rp,Ya,Yp,Kp,Jp,ut,Fo,Qp,Ka,Zp,eh,we,qo,th,Ja,oh,nh,Ao,rh,Qa,ah,sh,$l,yr,wl,Ne,gt,Za,Co,ih,es,lh,Tl,te,dh,ts,mh,ch,os,ph,hh,ns,fh,uh,xl,Io,Pl,Te,gh,rs,_h,bh,as,vh,yh,El,Lo,Ml,_t,$h,ss,wh,Th,kl,zo,jl,$r,xh,Dl,He,bt,is,Oo,Ph,ls,Eh,Fl,j,Uo,Mh,No,kh,ds,jh,Dh,Fh,xe,Ho,qh,ms,Ah,Ch,Xe,Ih,cs,Lh,zh,ps,Oh,Uh,Nh,vt,Xo,Hh,hs,Xh,Bh,yt,Bo,Vh,Be,Sh,fs,Wh,Gh,Vo,Rh,Yh,Kh,$t,So,Jh,us,Qh,Zh,wt,Wo,ef,gs,tf,of,Tt,Go,nf,_s,rf,af,xt,Ro,sf,bs,lf,df,Pt,Yo,mf,Ve,cf,vs,pf,hf,wr,ff,uf,ql,Se,Et,ys,Ko,gf,$s,_f,Al,b,Jo,bf,ws,vf,yf,Tr,xr,$f,wf,Tf,Qo,Ts,xf,Pf,xs,Ef,Mf,Ps,kf,jf,We,Pe,Es,Df,Ff,Pr,qf,Af,Er,Cf,If,Lf,Mt,Ms,zf,Of,ks,Uf,Nf,Hf,N,js,Xf,Bf,Ds,Vf,Sf,Fs,Wf,Gf,qs,Rf,Yf,As,Kf,Jf,Qf,oe,Zo,Zf,en,eu,Cs,tu,ou,nu,Is,ru,au,tn,su,kt,on,iu,Ls,lu,du,C,nn,mu,zs,cu,pu,rn,hu,Os,fu,uu,gu,an,_u,Us,bu,vu,yu,jt,$u,Ns,wu,Tu,sn,xu,Dt,ln,Pu,Hs,Eu,Mu,Ft,dn,ku,Xs,ju,Du,qt,mn,Fu,Bs,qu,Au,At,cn,Cu,Vs,Iu,Lu,Ct,pn,zu,Ss,Ou,Uu,It,hn,Nu,Ws,Hu,Xu,Lt,fn,Bu,Gs,Vu,Su,zt,un,Wu,Rs,Gu,Ru,Ee,gn,Yu,_n,Ku,Ys,Ju,Qu,Zu,bn,eg,Ks,tg,og,ng,Ot,vn,rg,yn,ag,Mr,sg,ig,lg,Ut,$n,dg,Js,mg,cg,Nt,wn,pg,Qs,hg,fg,Ht,Tn,ug,Zs,gg,_g,Xt,xn,bg,ei,vg,yg,Bt,Pn,$g,ti,wg,Tg,Vt,En,xg,oi,Pg,Eg,ne,Mn,Mg,R,kg,ni,jg,Dg,ri,Fg,qg,ai,Ag,Cg,si,Ig,Lg,zg,Ge,Og,ii,Ug,Ng,li,Hg,Xg,Bg,Re,Vg,di,Sg,Wg,mi,Gg,Rg,Cl,Ye,St,ci,kn,Yg,pi,Kg,Il,me,jn,Jg,Dn,Qg,hi,Zg,e_,t_,Wt,Fn,o_,fi,n_,Ll,Ke,Gt,ui,qn,r_,gi,a_,zl,M,An,s_,_i,i_,l_,kr,jr,d_,m_,c_,bi,p_,h_,Je,Me,vi,f_,u_,Dr,g_,__,Fr,b_,v_,y_,Rt,yi,$_,w_,$i,T_,x_,P_,H,wi,E_,M_,Ti,k_,j_,xi,D_,F_,Pi,q_,A_,Ei,C_,I_,L_,re,Cn,z_,In,O_,Mi,U_,N_,H_,ki,X_,B_,Ln,V_,O,zn,S_,ji,W_,G_,On,R_,Di,Y_,K_,J_,Un,Q_,Fi,Z_,eb,tb,qi,ob,nb,Nn,rb,ke,Hn,ab,Ai,sb,ib,Yt,lb,Kt,Xn,db,Bn,mb,Ci,cb,pb,hb,X,Vn,fb,Y,ub,Ii,gb,_b,Li,bb,vb,zi,yb,$b,Oi,wb,Tb,xb,Ui,Pb,Eb,Ni,Mb,kb,Sn,jb,B,Wn,Db,K,Fb,Hi,qb,Ab,Xi,Cb,Ib,Bi,Lb,zb,Vi,Ob,Ub,Nb,Si,Hb,Xb,Wi,Bb,Vb,Gn,Sb,ae,Rn,Wb,J,Gb,Gi,Rb,Yb,Ri,Kb,Jb,Yi,Qb,Zb,Ki,ev,tv,ov,Ji,nv,rv,Yn,Ol,Qe,Jt,Qi,Kn,av,Zi,sv,Ul,ce,Jn,iv,el,lv,dv,se,Qn,mv,Zn,cv,tl,pv,hv,fv,ol,uv,gv,er,Nl;return A=new ao({}),so=new ao({}),io=new v({props:{name:"class transformers.PreTrainedModel",anchor:"transformers.PreTrainedModel",parameters:[{name:"config",val:": PretrainedConfig"},{name:"*inputs",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L423"}}),co=new v({props:{name:"push_to_hub",anchor:"transformers.file_utils.PushToHubMixin.push_to_hub",parameters:[{name:"repo_path_or_name",val:": typing.Optional[str] = None"},{name:"repo_url",val:": typing.Optional[str] = None"},{name:"use_temp_dir",val:": bool = False"},{name:"commit_message",val:": typing.Optional[str] = None"},{name:"organization",val:": typing.Optional[str] = None"},{name:"private",val:": typing.Optional[bool] = None"},{name:"use_auth_token",val:": typing.Union[bool, str, NoneType] = None"},{name:"**model_card_kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/file_utils.py#L2842",parametersDescription:[{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.repo_path_or_name",description:`<strong>repo_path_or_name</strong> (<code>str</code>, <em>optional</em>) &#x2014; Can either be a repository name for your model in the Hub or a path to a local folder (in which case the repository will have the name of that local folder). If not specified, will default to the name given by <code>repo_url</code> and a local directory with that name will be created.`,name:"repo_path_or_name"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.repo_url",description:`<strong>repo_url</strong> (<code>str</code>, <em>optional</em>) &#x2014; Specify this in case you want to push to an existing repository in the hub. If unspecified, a new repository will be created in your namespace (unless you specify an <code>organization</code>) with <code>repo_name</code>.`,name:"repo_url"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.use_temp_dir",description:`<strong>use_temp_dir</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to clone the distant repo in a temporary directory or in <code>repo_path_or_name</code> inside the current working directory. This will slow things down if you are making changes in an existing repo since you will need to clone the repo before every push.`,name:"use_temp_dir"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.commit_message",description:`<strong>commit_message</strong> (<code>str</code>, <em>optional</em>) &#x2014; Message to commit while pushing. Will default to <code>&quot;add model&quot;</code>.`,name:"commit_message"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.organization",description:`<strong>organization</strong> (<code>str</code>, <em>optional</em>) &#x2014; Organization in which you want to push your model (you must be a member of this organization).`,name:"organization"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.private",description:`<strong>private</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not the repository created should be private (requires a paying subscription).`,name:"private"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.use_auth_token",description:`<strong>use_auth_token</strong> (<code>bool</code> or <code>str</code>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>transformers-cli login</code> (stored in <code>~/.huggingface</code>). Will default to <code>True</code> if <code>repo_url</code> is not specified.`,name:"use_auth_token"}],returnDescription:` <p>The url of the commit of your model in the given repository.</p> `,returnType:` <p><code>str</code></p> `}}),ho=new Q({props:{code:`from transformers import AutoModel model = AutoModel.from_pretrained("bert-base-cased") # Push the model to your namespace with the name "my-finetuned-bert" and have a local clone in the # *my-finetuned-bert* folder. model.push_to_hub("my-finetuned-bert") # Push the model to your namespace with the name "my-finetuned-bert" with no local clone. model.push_to_hub("my-finetuned-bert", use_temp_dir=True) # Push the model to an organization with the name "my-finetuned-bert" and have a local clone in the # *my-finetuned-bert* folder. model.push_to_hub("my-finetuned-bert", organization="huggingface") # Make a change to an existing repo that has been cloned locally in *my-finetuned-bert*. model.push_to_hub("my-finetuned-bert", repo_url="https://huggingface.co/sgugger/my-finetuned-bert")`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModel model = AutoModel.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-comment"># Push the model to your namespace with the name &quot;my-finetuned-bert&quot; and have a local clone in the</span> <span class="hljs-comment"># *my-finetuned-bert* folder.</span> model.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>) <span class="hljs-comment"># Push the model to your namespace with the name &quot;my-finetuned-bert&quot; with no local clone.</span> model.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, use_temp_dir=<span class="hljs-literal">True</span>) <span class="hljs-comment"># Push the model to an organization with the name &quot;my-finetuned-bert&quot; and have a local clone in the</span> <span class="hljs-comment"># *my-finetuned-bert* folder.</span> model.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, organization=<span class="hljs-string">&quot;huggingface&quot;</span>) <span class="hljs-comment"># Make a change to an existing repo that has been cloned locally in *my-finetuned-bert*.</span> model.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, repo_url=<span class="hljs-string">&quot;https://huggingface.co/sgugger/my-finetuned-bert&quot;</span>)`}}),fo=new v({props:{name:"from_pretrained",anchor:"transformers.PreTrainedModel.from_pretrained",parameters:[{name:"pretrained_model_name_or_path",val:": typing.Union[str, os.PathLike, NoneType]"},{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L1091",parametersDescription:[{anchor:"transformers.PreTrainedModel.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<code>str</code> or <code>os.PathLike</code>, <em>optional</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>A path to a <em>directory</em> containing model weights saved using <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.save_pretrained">save_pretrained()</a>, e.g., <code>./my_model_directory/</code>.</li> <li>A path or url to a <em>tensorflow index checkpoint file</em> (e.g, <code>./tf_model/model.ckpt.index</code>). In this case, <code>from_tf</code> should be set to <code>True</code> and a configuration object should be provided as <code>config</code> argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.</li> <li>A path or url to a model folder containing a <em>flax checkpoint file</em> in <em>.msgpack</em> format (e.g, <code>./flax_model/</code> containing <code>flax_model.msgpack</code>). In this case, <code>from_flax</code> should be set to <code>True</code>.</li> <li><code>None</code> if you are both providing the configuration and state dictionary (resp. with keyword arguments <code>config</code> and <code>state_dict</code>).</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.PreTrainedModel.from_pretrained.model_args",description:`<strong>model_args</strong> (sequence of positional arguments, <em>optional</em>) &#x2014; All remaining positional arguments will be passed to the underlying model&#x2019;s <code>__init__</code> method.`,name:"model_args"},{anchor:"transformers.PreTrainedModel.from_pretrained.config",description:`<strong>config</strong> (<code>Union[PretrainedConfig, str, os.PathLike]</code>, <em>optional</em>) &#x2014; Can be either:</p> <ul> <li>an instance of a class derived from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>,</li> <li>a string or path valid as input to <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig.from_pretrained">from_pretrained()</a>.</li> </ul>`,name:"config"}]}}),st=new fl({props:{$$slots:{default:[aw]},$$scope:{ctx:S}}}),it=new fl({props:{$$slots:{default:[sw]},$$scope:{ctx:S}}}),_o=new Q({props:{code:`from transformers import BertConfig, BertModel # Download model and configuration from huggingface.co and cache. model = BertModel.from_pretrained("bert-base-uncased") # Model was saved using *save_pretrained('./test/saved_model/')* (for example purposes, not runnable). model = BertModel.from_pretrained("./test/saved_model/") # Update configuration during loading. model = BertModel.from_pretrained("bert-base-uncased", output_attentions=True) assert model.config.output_attentions == True # Loading from a TF checkpoint file instead of a PyTorch model (slower, for example purposes, not runnable). config = BertConfig.from_json_file("./tf_model/my_tf_model_config.json") model = BertModel.from_pretrained("./tf_model/my_tf_checkpoint.ckpt.index", from_tf=True, config=config) # Loading from a Flax checkpoint file instead of a PyTorch model (slower) model = BertModel.from_pretrained("bert-base-uncased", from_flax=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertConfig, BertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = BertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Model was saved using *save_pretrained(&#x27;./test/saved_model/&#x27;)* (for example purposes, not runnable).</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = BertModel.from_pretrained(<span class="hljs-string">&quot;./test/saved_model/&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = BertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">assert</span> model.config.output_attentions == <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a TF checkpoint file instead of a PyTorch model (slower, for example purposes, not runnable).</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = BertConfig.from_json_file(<span class="hljs-string">&quot;./tf_model/my_tf_model_config.json&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = BertModel.from_pretrained(<span class="hljs-string">&quot;./tf_model/my_tf_checkpoint.ckpt.index&quot;</span>, from_tf=<span class="hljs-literal">True</span>, config=config) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a Flax checkpoint file instead of a PyTorch model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = BertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>, from_flax=<span class="hljs-literal">True</span>)`}}),bo=new v({props:{name:"get_input_embeddings",anchor:"transformers.PreTrainedModel.get_input_embeddings",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L575",returnDescription:` <p>A torch module mapping vocabulary to hidden states.</p> `,returnType:` <p><code>nn.Module</code></p> `}}),vo=new v({props:{name:"get_output_embeddings",anchor:"transformers.PreTrainedModel.get_output_embeddings",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L601",returnDescription:` <p>A torch module mapping hidden states to vocabulary.</p> `,returnType:` <p><code>nn.Module</code></p> `}}),yo=new v({props:{name:"gradient_checkpointing_disable",anchor:"transformers.PreTrainedModel.gradient_checkpointing_disable",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L980"}}),$o=new v({props:{name:"gradient_checkpointing_enable",anchor:"transformers.PreTrainedModel.gradient_checkpointing_enable",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L969"}}),wo=new v({props:{name:"init_weights",anchor:"transformers.PreTrainedModel.init_weights",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L936"}}),To=new v({props:{name:"post_init",anchor:"transformers.PreTrainedModel.post_init",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L494"}}),xo=new v({props:{name:"prune_heads",anchor:"transformers.PreTrainedModel.prune_heads",parameters:[{name:"heads_to_prune",val:": typing.Dict[int, typing.List[int]]"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L952",parametersDescription:[{anchor:"transformers.PreTrainedModel.prune_heads.heads_to_prune",description:`<strong>heads_to_prune</strong> (<code>Dict[int, List[int]]</code>) &#x2014; Dictionary with keys being selected layer indices (<code>int</code>) and associated values being the list of heads to prune in said layer (list of <code>int</code>). For instance {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.`,name:"heads_to_prune"}]}}),Po=new v({props:{name:"register_for_auto_class",anchor:"transformers.PreTrainedModel.register_for_auto_class",parameters:[{name:"auto_class",val:" = 'AutoModel'"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L1817",parametersDescription:[{anchor:"transformers.PreTrainedModel.register_for_auto_class.auto_class",description:`<strong>auto_class</strong> (<code>str</code> or <code>type</code>, <em>optional</em>, defaults to <code>&quot;AutoModel&quot;</code>) &#x2014; The auto class to register this new model with.`,name:"auto_class"}]}}),ht=new fl({props:{warning:"&lcub;true}",$$slots:{default:[iw]},$$scope:{ctx:S}}}),Eo=new v({props:{name:"resize_token_embeddings",anchor:"transformers.PreTrainedModel.resize_token_embeddings",parameters:[{name:"new_num_tokens",val:": typing.Optional[int] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L729",parametersDescription:[{anchor:"transformers.PreTrainedModel.resize_token_embeddings.new_num_tokens",description:`<strong>new_num_tokens</strong> (<code>int</code>, <em>optional</em>) &#x2014; The number of new tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or <code>None</code>, just returns a pointer to the input tokens <code>torch.nn.Embedding</code> module of the model without doing anything.`,name:"new_num_tokens"}],returnDescription:` <p>Pointer to the input tokens Embeddings Module of the model.</p> `,returnType:` <p><code>torch.nn.Embedding</code></p> `}}),jo=new v({props:{name:"save_pretrained",anchor:"transformers.PreTrainedModel.save_pretrained",parameters:[{name:"save_directory",val:": typing.Union[str, os.PathLike]"},{name:"save_config",val:": bool = True"},{name:"state_dict",val:": typing.Optional[dict] = None"},{name:"save_function",val:": typing.Callable = <function save at 0x7f461a422820>"},{name:"push_to_hub",val:": bool = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L1000",parametersDescription:[{anchor:"transformers.PreTrainedModel.save_pretrained.save_directory",description:`<strong>save_directory</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; Directory to which to save. Will be created if it doesn&#x2019;t exist.`,name:"save_directory"},{anchor:"transformers.PreTrainedModel.save_pretrained.save_config",description:`<strong>save_config</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to save the config of the model. Useful when in distributed training like TPUs and need to call this function on all processes. In this case, set <code>save_config=True</code> only on the main process to avoid race conditions.`,name:"save_config"},{anchor:"transformers.PreTrainedModel.save_pretrained.state_dict",description:`<strong>state_dict</strong> (nested dictionary of <code>torch.Tensor</code>) &#x2014; The state dictionary of the model to save. Will default to <code>self.state_dict()</code>, but can be used to only save parts of the model or if special precautions need to be taken when recovering the state dictionary of a model (like when using model parallelism).`,name:"state_dict"},{anchor:"transformers.PreTrainedModel.save_pretrained.save_function",description:`<strong>save_function</strong> (<code>Callable</code>) &#x2014; The function to use to save the state dictionary. Useful on distributed training like TPUs when one need to replace <code>torch.save</code> by another method.`,name:"save_function"},{anchor:"transformers.PreTrainedModel.save_pretrained.push_to_hub",description:`<strong>push_to_hub</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to push your model to the Hugging Face model hub after saving it.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p>Using <code>push_to_hub=True</code> will synchronize the repository you are pushing to with <code>save_directory</code>, which requires <code>save_directory</code> to be a local clone of the repo you are pushing to if it&#x2019;s an existing folder. Pass along <code>temp_dir=True</code> to use a temporary directory instead.</p> </div> <p>kwargs &#x2014; Additional key word arguments passed along to the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.file_utils.PushToHubMixin.push_to_hub">push_to_hub()</a> method.`,name:"push_to_hub"}]}}),Fo=new v({props:{name:"set_input_embeddings",anchor:"transformers.PreTrainedModel.set_input_embeddings",parameters:[{name:"value",val:": Module"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L588",parametersDescription:[{anchor:"transformers.PreTrainedModel.set_input_embeddings.value",description:"<strong>value</strong> (<code>nn.Module</code>) &#x2014; A module mapping vocabulary to hidden states.",name:"value"}]}}),qo=new v({props:{name:"tie_weights",anchor:"transformers.PreTrainedModel.tie_weights",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L616"}}),Co=new ao({}),Io=new Q({props:{code:'model = T5ForConditionalGeneration.from_pretrained("t5", torch_dtype=torch.float16)',highlighted:'model = T5ForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;t5&quot;</span>, torch_dtype=torch.float16)'}}),Lo=new Q({props:{code:'model = T5ForConditionalGeneration.from_pretrained("t5", torch_dtype="auto")',highlighted:'model = T5ForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;t5&quot;</span>, torch_dtype=<span class="hljs-string">&quot;auto&quot;</span>)'}}),zo=new Q({props:{code:`config = T5Config.from_pretrained("t5") model = AutoModel.from_config(config)`,highlighted:`config = T5Config.from_pretrained(<span class="hljs-string">&quot;t5&quot;</span>) model = AutoModel.from_config(config)`}}),Oo=new ao({}),Uo=new v({props:{name:"class transformers.modeling_utils.ModuleUtilsMixin",anchor:"transformers.modeling_utils.ModuleUtilsMixin",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L151"}}),Ho=new v({props:{name:"add_memory_hooks",anchor:"transformers.modeling_utils.ModuleUtilsMixin.add_memory_hooks",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L182"}}),Xo=new v({props:{name:"estimate_tokens",anchor:"transformers.modeling_utils.ModuleUtilsMixin.estimate_tokens",parameters:[{name:"input_dict",val:": typing.Dict[str, typing.Union[torch.Tensor, typing.Any]]"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L378",parametersDescription:[{anchor:"transformers.modeling_utils.ModuleUtilsMixin.estimate_tokens.inputs",description:"<strong>inputs</strong> (<code>dict</code>) &#x2014; The model inputs.",name:"inputs"}],returnDescription:` <p>The total number of tokens.</p> `,returnType:` <p><code>int</code></p> `}}),Bo=new v({props:{name:"floating_point_ops",anchor:"transformers.modeling_utils.ModuleUtilsMixin.floating_point_ops",parameters:[{name:"input_dict",val:": typing.Dict[str, typing.Union[torch.Tensor, typing.Any]]"},{name:"exclude_embeddings",val:": bool = True"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L396",parametersDescription:[{anchor:"transformers.modeling_utils.ModuleUtilsMixin.floating_point_ops.batch_size",description:`<strong>batch_size</strong> (<code>int</code>) &#x2014; The batch size for the forward pass.`,name:"batch_size"},{anchor:"transformers.modeling_utils.ModuleUtilsMixin.floating_point_ops.sequence_length",description:`<strong>sequence_length</strong> (<code>int</code>) &#x2014; The number of tokens in each line of the batch.`,name:"sequence_length"},{anchor:"transformers.modeling_utils.ModuleUtilsMixin.floating_point_ops.exclude_embeddings",description:`<strong>exclude_embeddings</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to count embedding and softmax operations.`,name:"exclude_embeddings"}],returnDescription:` <p>The number of floating-point operations.</p> `,returnType:` <p><code>int</code></p> `}}),So=new v({props:{name:"get_extended_attention_mask",anchor:"transformers.modeling_utils.ModuleUtilsMixin.get_extended_attention_mask",parameters:[{name:"attention_mask",val:": Tensor"},{name:"input_shape",val:": typing.Tuple[int]"},{name:"device",val:": <property object at 0x7f45b4efb950>"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L271",parametersDescription:[{anchor:"transformers.modeling_utils.ModuleUtilsMixin.get_extended_attention_mask.attention_mask",description:`<strong>attention_mask</strong> (<code>torch.Tensor</code>) &#x2014; Mask with ones indicating tokens to attend to, zeros for tokens to ignore.`,name:"attention_mask"},{anchor:"transformers.modeling_utils.ModuleUtilsMixin.get_extended_attention_mask.input_shape",description:`<strong>input_shape</strong> (<code>Tuple[int]</code>) &#x2014; The shape of the input to the model. device &#x2014; (<code>torch.device</code>): The device of the input to the model.`,name:"input_shape"}],returnDescription:` <p><code>torch.Tensor</code> The extended attention mask, with a the same dtype as <code>attention_mask.dtype</code>.</p> `}}),Wo=new v({props:{name:"get_head_mask",anchor:"transformers.modeling_utils.ModuleUtilsMixin.get_head_mask",parameters:[{name:"head_mask",val:": typing.Optional[torch.Tensor]"},{name:"num_hidden_layers",val:": int"},{name:"is_attention_chunked",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L314",parametersDescription:[{anchor:"transformers.modeling_utils.ModuleUtilsMixin.get_head_mask.head_mask",description:`<strong>head_mask</strong> (<code>torch.Tensor</code> with shape <code>[num_heads]</code> or <code>[num_hidden_layers x num_heads]</code>, <em>optional</em>) &#x2014; The mask indicating if we should keep the heads or not (1.0 for keep, 0.0 for discard).`,name:"head_mask"},{anchor:"transformers.modeling_utils.ModuleUtilsMixin.get_head_mask.num_hidden_layers",description:`<strong>num_hidden_layers</strong> (<code>int</code>) &#x2014; The number of hidden layers in the model. is_attention_chunked &#x2014; (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>): Whether or not the attentions scores are computed by chunks or not.`,name:"num_hidden_layers"}],returnDescription:` <p><code>torch.Tensor</code> with shape <code>[num_hidden_layers x batch x num_heads x seq_length x seq_length]</code> or list with <code>[None]</code> for each layer.</p> `}}),Go=new v({props:{name:"invert_attention_mask",anchor:"transformers.modeling_utils.ModuleUtilsMixin.invert_attention_mask",parameters:[{name:"encoder_attention_mask",val:": Tensor"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L218",parametersDescription:[{anchor:"transformers.modeling_utils.ModuleUtilsMixin.invert_attention_mask.encoder_attention_mask",description:"<strong>encoder_attention_mask</strong> (<code>torch.Tensor</code>) &#x2014; An attention mask.",name:"encoder_attention_mask"}],returnDescription:` <p>The inverted attention mask.</p> `,returnType:` <p><code>torch.Tensor</code></p> `}}),Ro=new v({props:{name:"num_parameters",anchor:"transformers.modeling_utils.ModuleUtilsMixin.num_parameters",parameters:[{name:"only_trainable",val:": bool = False"},{name:"exclude_embeddings",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L352",parametersDescription:[{anchor:"transformers.modeling_utils.ModuleUtilsMixin.num_parameters.only_trainable",description:`<strong>only_trainable</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return only the number of trainable parameters`,name:"only_trainable"},{anchor:"transformers.modeling_utils.ModuleUtilsMixin.num_parameters.exclude_embeddings",description:`<strong>exclude_embeddings</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return only the number of non-embeddings parameters`,name:"exclude_embeddings"}],returnDescription:` <p>The number of parameters.</p> `,returnType:` <p><code>int</code></p> `}}),Yo=new v({props:{name:"reset_memory_hooks_state",anchor:"transformers.modeling_utils.ModuleUtilsMixin.reset_memory_hooks_state",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L194"}}),Ko=new ao({}),Jo=new v({props:{name:"class transformers.TFPreTrainedModel",anchor:"transformers.TFPreTrainedModel",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L686"}}),Zo=new v({props:{name:"push_to_hub",anchor:"transformers.file_utils.PushToHubMixin.push_to_hub",parameters:[{name:"repo_path_or_name",val:": typing.Optional[str] = None"},{name:"repo_url",val:": typing.Optional[str] = None"},{name:"use_temp_dir",val:": bool = False"},{name:"commit_message",val:": typing.Optional[str] = None"},{name:"organization",val:": typing.Optional[str] = None"},{name:"private",val:": typing.Optional[bool] = None"},{name:"use_auth_token",val:": typing.Union[bool, str, NoneType] = None"},{name:"**model_card_kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/file_utils.py#L2842",parametersDescription:[{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.repo_path_or_name",description:`<strong>repo_path_or_name</strong> (<code>str</code>, <em>optional</em>) &#x2014; Can either be a repository name for your model in the Hub or a path to a local folder (in which case the repository will have the name of that local folder). If not specified, will default to the name given by <code>repo_url</code> and a local directory with that name will be created.`,name:"repo_path_or_name"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.repo_url",description:`<strong>repo_url</strong> (<code>str</code>, <em>optional</em>) &#x2014; Specify this in case you want to push to an existing repository in the hub. If unspecified, a new repository will be created in your namespace (unless you specify an <code>organization</code>) with <code>repo_name</code>.`,name:"repo_url"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.use_temp_dir",description:`<strong>use_temp_dir</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to clone the distant repo in a temporary directory or in <code>repo_path_or_name</code> inside the current working directory. This will slow things down if you are making changes in an existing repo since you will need to clone the repo before every push.`,name:"use_temp_dir"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.commit_message",description:`<strong>commit_message</strong> (<code>str</code>, <em>optional</em>) &#x2014; Message to commit while pushing. Will default to <code>&quot;add model&quot;</code>.`,name:"commit_message"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.organization",description:`<strong>organization</strong> (<code>str</code>, <em>optional</em>) &#x2014; Organization in which you want to push your model (you must be a member of this organization).`,name:"organization"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.private",description:`<strong>private</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not the repository created should be private (requires a paying subscription).`,name:"private"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.use_auth_token",description:`<strong>use_auth_token</strong> (<code>bool</code> or <code>str</code>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>transformers-cli login</code> (stored in <code>~/.huggingface</code>). Will default to <code>True</code> if <code>repo_url</code> is not specified.`,name:"use_auth_token"}],returnDescription:` <p>The url of the commit of your model in the given repository.</p> `,returnType:` <p><code>str</code></p> `}}),tn=new Q({props:{code:`from transformers import TFAutoModel model = TFAutoModel.from_pretrained("bert-base-cased") # Push the model to your namespace with the name "my-finetuned-bert" and have a local clone in the # *my-finetuned-bert* folder. model.push_to_hub("my-finetuned-bert") # Push the model to your namespace with the name "my-finetuned-bert" with no local clone. model.push_to_hub("my-finetuned-bert", use_temp_dir=True) # Push the model to an organization with the name "my-finetuned-bert" and have a local clone in the # *my-finetuned-bert* folder. model.push_to_hub("my-finetuned-bert", organization="huggingface") # Make a change to an existing repo that has been cloned locally in *my-finetuned-bert*. model.push_to_hub("my-finetuned-bert", repo_url="https://huggingface.co/sgugger/my-finetuned-bert")`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModel model = TFAutoModel.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-comment"># Push the model to your namespace with the name &quot;my-finetuned-bert&quot; and have a local clone in the</span> <span class="hljs-comment"># *my-finetuned-bert* folder.</span> model.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>) <span class="hljs-comment"># Push the model to your namespace with the name &quot;my-finetuned-bert&quot; with no local clone.</span> model.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, use_temp_dir=<span class="hljs-literal">True</span>) <span class="hljs-comment"># Push the model to an organization with the name &quot;my-finetuned-bert&quot; and have a local clone in the</span> <span class="hljs-comment"># *my-finetuned-bert* folder.</span> model.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, organization=<span class="hljs-string">&quot;huggingface&quot;</span>) <span class="hljs-comment"># Make a change to an existing repo that has been cloned locally in *my-finetuned-bert*.</span> model.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, repo_url=<span class="hljs-string">&quot;https://huggingface.co/sgugger/my-finetuned-bert&quot;</span>)`}}),on=new v({props:{name:"compile",anchor:"transformers.TFPreTrainedModel.compile",parameters:[{name:"optimizer",val:" = 'rmsprop'"},{name:"loss",val:" = 'passthrough'"},{name:"metrics",val:" = None"},{name:"loss_weights",val:" = None"},{name:"weighted_metrics",val:" = None"},{name:"run_eagerly",val:" = None"},{name:"steps_per_execution",val:" = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L877"}}),nn=new v({props:{name:"from_pretrained",anchor:"transformers.TFPreTrainedModel.from_pretrained",parameters:[{name:"pretrained_model_name_or_path",val:""},{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L1439",parametersDescription:[{anchor:"transformers.TFPreTrainedModel.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>A path to a <em>directory</em> containing model weights saved using <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel.save_pretrained">save_pretrained()</a>, e.g., <code>./my_model_directory/</code>.</li> <li>A path or url to a <em>PyTorch state_dict save file</em> (e.g, <code>./pt_model/pytorch_model.bin</code>). In this case, <code>from_pt</code> should be set to <code>True</code> and a configuration object should be provided as <code>config</code> argument. This loading path is slower than converting the PyTorch model in a TensorFlow model using the provided conversion scripts and loading the TensorFlow model afterwards.</li> <li><code>None</code> if you are both providing the configuration and state dictionary (resp. with keyword arguments <code>config</code> and <code>state_dict</code>).</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.TFPreTrainedModel.from_pretrained.model_args",description:`<strong>model_args</strong> (sequence of positional arguments, <em>optional</em>) &#x2014; All remaining positional arguments will be passed to the underlying model&#x2019;s <code>__init__</code> method.`,name:"model_args"},{anchor:"transformers.TFPreTrainedModel.from_pretrained.config",description:`<strong>config</strong> (<code>Union[PretrainedConfig, str]</code>, <em>optional</em>) &#x2014; Can be either:</p> <ul> <li>an instance of a class derived from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>,</li> <li>a string valid as input to <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig.from_pretrained">from_pretrained()</a>.</li> </ul>`,name:"config"}]}}),jt=new fl({props:{$$slots:{default:[lw]},$$scope:{ctx:S}}}),sn=new Q({props:{code:`from transformers import BertConfig, TFBertModel # Download model and configuration from huggingface.co and cache. model = TFBertModel.from_pretrained("bert-base-uncased") # Model was saved using *save_pretrained('./test/saved_model/')* (for example purposes, not runnable). model = TFBertModel.from_pretrained("./test/saved_model/") # Update configuration during loading. model = TFBertModel.from_pretrained("bert-base-uncased", output_attentions=True) assert model.config.output_attentions == True # Loading from a Pytorch model file instead of a TensorFlow checkpoint (slower, for example purposes, not runnable). config = BertConfig.from_json_file("./pt_model/my_pt_model_config.json") model = TFBertModel.from_pretrained("./pt_model/my_pytorch_model.bin", from_pt=True, config=config)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertConfig, TFBertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFBertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Model was saved using *save_pretrained(&#x27;./test/saved_model/&#x27;)* (for example purposes, not runnable).</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFBertModel.from_pretrained(<span class="hljs-string">&quot;./test/saved_model/&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFBertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">assert</span> model.config.output_attentions == <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a Pytorch model file instead of a TensorFlow checkpoint (slower, for example purposes, not runnable).</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = BertConfig.from_json_file(<span class="hljs-string">&quot;./pt_model/my_pt_model_config.json&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFBertModel.from_pretrained(<span class="hljs-string">&quot;./pt_model/my_pytorch_model.bin&quot;</span>, from_pt=<span class="hljs-literal">True</span>, config=config)`}}),ln=new v({props:{name:"get_bias",anchor:"transformers.TFPreTrainedModel.get_bias",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L1123",returnDescription:` <p>The weights representing the bias, None if not an LM model.</p> `,returnType:` <p><code>tf.Variable</code></p> `}}),dn=new v({props:{name:"get_input_embeddings",anchor:"transformers.TFPreTrainedModel.get_input_embeddings",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L796",returnDescription:` <p>The embeddings layer mapping vocabulary to hidden states.</p> `,returnType:` <p><code>tf.Variable</code></p> `}}),mn=new v({props:{name:"get_lm_head",anchor:"transformers.TFPreTrainedModel.get_lm_head",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L1156",returnDescription:` <p>The LM head layer if the model has one, None if not.</p> `,returnType:` <p><code>tf.keras.layers.Layer</code></p> `}}),cn=new v({props:{name:"get_output_embeddings",anchor:"transformers.TFPreTrainedModel.get_output_embeddings",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L1063",returnDescription:` <p>The new weights mapping vocabulary to hidden states.</p> `,returnType:` <p><code>tf.Variable</code></p> `}}),pn=new v({props:{name:"get_output_layer_with_bias",anchor:"transformers.TFPreTrainedModel.get_output_layer_with_bias",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L1100",returnDescription:` <p>The layer that handles the bias, None if not an LM model.</p> `,returnType:` <p><code>tf.keras.layers.Layer</code></p> `}}),hn=new v({props:{name:"get_prefix_bias_name",anchor:"transformers.TFPreTrainedModel.get_prefix_bias_name",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L1113",returnDescription:` <p>The _prefix name of the bias.</p> `,returnType:` <p><code>str</code></p> `}}),fn=new v({props:{name:"load_repo_checkpoint",anchor:"transformers.TFPreTrainedModel.load_repo_checkpoint",parameters:[{name:"repo_path_or_name",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L823",parametersDescription:[{anchor:"transformers.TFPreTrainedModel.load_repo_checkpoint.repo_path_or_name",description:`<strong>repo_path_or_name</strong> (<code>str</code>) &#x2014; Can either be a repository name for your {object} in the Hub or a path to a local folder (in which case the repository will have the name of that local folder).`,name:"repo_path_or_name"}],returnDescription:` <p>A dictionary of extra metadata from the checkpoint, most commonly an \u201Cepoch\u201D count.</p> `,returnType:` <p><code>dict</code></p> `}}),un=new v({props:{name:"prune_heads",anchor:"transformers.TFPreTrainedModel.prune_heads",parameters:[{name:"heads_to_prune",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L1365",parametersDescription:[{anchor:"transformers.TFPreTrainedModel.prune_heads.heads_to_prune",description:`<strong>heads_to_prune</strong> (<code>Dict[int, List[int]]</code>) &#x2014; Dictionary with keys being selected layer indices (<code>int</code>) and associated values being the list of heads to prune in said layer (list of <code>int</code>). For instance {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.`,name:"heads_to_prune"}]}}),gn=new v({props:{name:"resize_token_embeddings",anchor:"transformers.TFPreTrainedModel.resize_token_embeddings",parameters:[{name:"new_num_tokens",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L1165",parametersDescription:[{anchor:"transformers.TFPreTrainedModel.resize_token_embeddings.new_num_tokens",description:`<strong>new_num_tokens</strong> (<code>int</code>, <em>optional</em>) &#x2014; The number of new tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or <code>None</code>, just returns a pointer to the input tokens <code>tf.Variable</code> module of the model without doing anything.`,name:"new_num_tokens"}],returnDescription:` <p>Pointer to the input tokens Embeddings Module of the model.</p> `,returnType:` <p><code>tf.Variable</code></p> `}}),vn=new v({props:{name:"save_pretrained",anchor:"transformers.TFPreTrainedModel.save_pretrained",parameters:[{name:"save_directory",val:""},{name:"saved_model",val:" = False"},{name:"version",val:" = 1"},{name:"push_to_hub",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L1377",parametersDescription:[{anchor:"transformers.TFPreTrainedModel.save_pretrained.save_directory",description:`<strong>save_directory</strong> (<code>str</code>) &#x2014; Directory to which to save. Will be created if it doesn&#x2019;t exist.`,name:"save_directory"},{anchor:"transformers.TFPreTrainedModel.save_pretrained.saved_model",description:`<strong>saved_model</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; If the model has to be saved in saved model format as well or not.`,name:"saved_model"},{anchor:"transformers.TFPreTrainedModel.save_pretrained.version",description:`<strong>version</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; The version of the saved model. A saved model needs to be versioned in order to be properly loaded by TensorFlow Serving as detailed in the official documentation <a href="https://www.tensorflow.org/tfx/serving/serving_basic" rel="nofollow">https://www.tensorflow.org/tfx/serving/serving_basic</a>`,name:"version"},{anchor:"transformers.TFPreTrainedModel.save_pretrained.push_to_hub",description:`<strong>push_to_hub</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to push your model to the Hugging Face model hub after saving it.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p>Using <code>push_to_hub=True</code> will synchronize the repository you are pushing to with <code>save_directory</code>, which requires <code>save_directory</code> to be a local clone of the repo you are pushing to if it&#x2019;s an existing folder. Pass along <code>temp_dir=True</code> to use a temporary directory instead.</p> </div> <p>kwargs &#x2014; Additional key word arguments passed along to the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.file_utils.PushToHubMixin.push_to_hub">push_to_hub()</a> method.`,name:"push_to_hub"}]}}),$n=new v({props:{name:"serving",anchor:"transformers.TFPreTrainedModel.serving",parameters:[{name:"inputs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L765",parametersDescription:[{anchor:"transformers.TFPreTrainedModel.serving.inputs",description:`<strong>inputs</strong> (<code>Dict[str, tf.Tensor]</code>) &#x2014; The input of the saved model as a dictionary of tensors.`,name:"inputs"}]}}),wn=new v({props:{name:"serving_output",anchor:"transformers.TFPreTrainedModel.serving_output",parameters:[{name:"output",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L786",parametersDescription:[{anchor:"transformers.TFPreTrainedModel.serving_output.output",description:`<strong>output</strong> (<code>TFBaseModelOutput</code>) &#x2014; The output returned by the model.`,name:"output"}]}}),Tn=new v({props:{name:"set_bias",anchor:"transformers.TFPreTrainedModel.set_bias",parameters:[{name:"value",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L1140",parametersDescription:[{anchor:"transformers.TFPreTrainedModel.set_bias.value",description:`<strong>value</strong> (<code>Dict[tf.Variable]</code>) &#x2014; All the new bias attached to an LM head.`,name:"value"}]}}),xn=new v({props:{name:"set_input_embeddings",anchor:"transformers.TFPreTrainedModel.set_input_embeddings",parameters:[{name:"value",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L1043",parametersDescription:[{anchor:"transformers.TFPreTrainedModel.set_input_embeddings.value",description:`<strong>value</strong> (<code>tf.Variable</code>) &#x2014; The new weights mapping hidden states to vocabulary.`,name:"value"}]}}),Pn=new v({props:{name:"set_output_embeddings",anchor:"transformers.TFPreTrainedModel.set_output_embeddings",parameters:[{name:"value",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L1083",parametersDescription:[{anchor:"transformers.TFPreTrainedModel.set_output_embeddings.value",description:`<strong>value</strong> (<code>tf.Variable</code>) &#x2014; The new weights mapping hidden states to vocabulary.`,name:"value"}]}}),En=new v({props:{name:"test_step",anchor:"transformers.TFPreTrainedModel.test_step",parameters:[{name:"data",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L977"}}),Mn=new v({props:{name:"train_step",anchor:"transformers.TFPreTrainedModel.train_step",parameters:[{name:"data",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L926"}}),kn=new ao({}),jn=new v({props:{name:"class transformers.modeling_tf_utils.TFModelUtilsMixin",anchor:"transformers.modeling_tf_utils.TFModelUtilsMixin",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L83"}}),Fn=new v({props:{name:"num_parameters",anchor:"transformers.modeling_tf_utils.TFModelUtilsMixin.num_parameters",parameters:[{name:"only_trainable",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L88",parametersDescription:[{anchor:"transformers.modeling_tf_utils.TFModelUtilsMixin.num_parameters.only_trainable",description:`<strong>only_trainable</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return only the number of trainable parameters`,name:"only_trainable"}],returnDescription:` <p>The number of parameters.</p> `,returnType:` <p><code>int</code></p> `}}),qn=new ao({}),An=new v({props:{name:"class transformers.FlaxPreTrainedModel",anchor:"transformers.FlaxPreTrainedModel",parameters:[{name:"config",val:": PretrainedConfig"},{name:"module",val:": Module"},{name:"input_shape",val:": typing.Tuple = (1, 1)"},{name:"seed",val:": int = 0"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_flax_utils.py#L72"}}),Cn=new v({props:{name:"push_to_hub",anchor:"transformers.file_utils.PushToHubMixin.push_to_hub",parameters:[{name:"repo_path_or_name",val:": typing.Optional[str] = None"},{name:"repo_url",val:": typing.Optional[str] = None"},{name:"use_temp_dir",val:": bool = False"},{name:"commit_message",val:": typing.Optional[str] = None"},{name:"organization",val:": typing.Optional[str] = None"},{name:"private",val:": typing.Optional[bool] = None"},{name:"use_auth_token",val:": typing.Union[bool, str, NoneType] = None"},{name:"**model_card_kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/file_utils.py#L2842",parametersDescription:[{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.repo_path_or_name",description:`<strong>repo_path_or_name</strong> (<code>str</code>, <em>optional</em>) &#x2014; Can either be a repository name for your model in the Hub or a path to a local folder (in which case the repository will have the name of that local folder). If not specified, will default to the name given by <code>repo_url</code> and a local directory with that name will be created.`,name:"repo_path_or_name"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.repo_url",description:`<strong>repo_url</strong> (<code>str</code>, <em>optional</em>) &#x2014; Specify this in case you want to push to an existing repository in the hub. If unspecified, a new repository will be created in your namespace (unless you specify an <code>organization</code>) with <code>repo_name</code>.`,name:"repo_url"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.use_temp_dir",description:`<strong>use_temp_dir</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to clone the distant repo in a temporary directory or in <code>repo_path_or_name</code> inside the current working directory. This will slow things down if you are making changes in an existing repo since you will need to clone the repo before every push.`,name:"use_temp_dir"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.commit_message",description:`<strong>commit_message</strong> (<code>str</code>, <em>optional</em>) &#x2014; Message to commit while pushing. Will default to <code>&quot;add model&quot;</code>.`,name:"commit_message"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.organization",description:`<strong>organization</strong> (<code>str</code>, <em>optional</em>) &#x2014; Organization in which you want to push your model (you must be a member of this organization).`,name:"organization"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.private",description:`<strong>private</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not the repository created should be private (requires a paying subscription).`,name:"private"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.use_auth_token",description:`<strong>use_auth_token</strong> (<code>bool</code> or <code>str</code>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>transformers-cli login</code> (stored in <code>~/.huggingface</code>). Will default to <code>True</code> if <code>repo_url</code> is not specified.`,name:"use_auth_token"}],returnDescription:` <p>The url of the commit of your model in the given repository.</p> `,returnType:` <p><code>str</code></p> `}}),Ln=new Q({props:{code:`from transformers import FlaxAutoModel model = FlaxAutoModel.from_pretrained("bert-base-cased") # Push the model to your namespace with the name "my-finetuned-bert" and have a local clone in the # *my-finetuned-bert* folder. model.push_to_hub("my-finetuned-bert") # Push the model to your namespace with the name "my-finetuned-bert" with no local clone. model.push_to_hub("my-finetuned-bert", use_temp_dir=True) # Push the model to an organization with the name "my-finetuned-bert" and have a local clone in the # *my-finetuned-bert* folder. model.push_to_hub("my-finetuned-bert", organization="huggingface") # Make a change to an existing repo that has been cloned locally in *my-finetuned-bert*. model.push_to_hub("my-finetuned-bert", repo_url="https://huggingface.co/sgugger/my-finetuned-bert")`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FlaxAutoModel model = FlaxAutoModel.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-comment"># Push the model to your namespace with the name &quot;my-finetuned-bert&quot; and have a local clone in the</span> <span class="hljs-comment"># *my-finetuned-bert* folder.</span> model.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>) <span class="hljs-comment"># Push the model to your namespace with the name &quot;my-finetuned-bert&quot; with no local clone.</span> model.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, use_temp_dir=<span class="hljs-literal">True</span>) <span class="hljs-comment"># Push the model to an organization with the name &quot;my-finetuned-bert&quot; and have a local clone in the</span> <span class="hljs-comment"># *my-finetuned-bert* folder.</span> model.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, organization=<span class="hljs-string">&quot;huggingface&quot;</span>) <span class="hljs-comment"># Make a change to an existing repo that has been cloned locally in *my-finetuned-bert*.</span> model.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, repo_url=<span class="hljs-string">&quot;https://huggingface.co/sgugger/my-finetuned-bert&quot;</span>)`}}),zn=new v({props:{name:"from_pretrained",anchor:"transformers.FlaxPreTrainedModel.from_pretrained",parameters:[{name:"pretrained_model_name_or_path",val:": typing.Union[str, os.PathLike]"},{name:"dtype",val:": dtype = <class 'jax._src.numpy.lax_numpy.float32'>"},{name:"*model_args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_flax_utils.py#L296",parametersDescription:[{anchor:"transformers.FlaxPreTrainedModel.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>A path to a <em>directory</em> containing model weights saved using <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.save_pretrained">save_pretrained()</a>, e.g., <code>./my_model_directory/</code>.</li> <li>A path or url to a <em>pt index checkpoint file</em> (e.g, <code>./tf_model/model.ckpt.index</code>). In this case, <code>from_pt</code> should be set to <code>True</code>.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.FlaxPreTrainedModel.from_pretrained.dtype",description:`<strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.`,name:"dtype"},{anchor:"transformers.FlaxPreTrainedModel.from_pretrained.model_args",description:`<strong>model_args</strong> (sequence of positional arguments, <em>optional</em>) &#x2014; All remaining positional arguments will be passed to the underlying model&#x2019;s <code>__init__</code> method.`,name:"model_args"},{anchor:"transformers.FlaxPreTrainedModel.from_pretrained.config",description:`<strong>config</strong> (<code>Union[PretrainedConfig, str, os.PathLike]</code>, <em>optional</em>) &#x2014; Can be either:</p> <ul> <li>an instance of a class derived from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>,</li> <li>a string or path valid as input to <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig.from_pretrained">from_pretrained()</a>.</li> </ul>`,name:"config"}]}}),Nn=new Q({props:{code:`from transformers import BertConfig, FlaxBertModel # Download model and configuration from huggingface.co and cache. model = FlaxBertModel.from_pretrained("bert-base-cased") # Model was saved using *save_pretrained('./test/saved_model/')* (for example purposes, not runnable). model = FlaxBertModel.from_pretrained("./test/saved_model/") # Loading from a PyTorch checkpoint file instead of a PyTorch model (slower, for example purposes, not runnable). config = BertConfig.from_json_file("./pt_model/config.json") model = FlaxBertModel.from_pretrained("./pt_model/pytorch_model.bin", from_pt=True, config=config)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertConfig, FlaxBertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Model was saved using *save_pretrained(&#x27;./test/saved_model/&#x27;)* (for example purposes, not runnable).</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBertModel.from_pretrained(<span class="hljs-string">&quot;./test/saved_model/&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a PyTorch checkpoint file instead of a PyTorch model (slower, for example purposes, not runnable).</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = BertConfig.from_json_file(<span class="hljs-string">&quot;./pt_model/config.json&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBertModel.from_pretrained(<span class="hljs-string">&quot;./pt_model/pytorch_model.bin&quot;</span>, from_pt=<span class="hljs-literal">True</span>, config=config)`}}),Hn=new v({props:{name:"register_for_auto_class",anchor:"transformers.FlaxPreTrainedModel.register_for_auto_class",parameters:[{name:"auto_class",val:" = 'FlaxAutoModel'"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_flax_utils.py#L716",parametersDescription:[{anchor:"transformers.FlaxPreTrainedModel.register_for_auto_class.auto_class",description:`<strong>auto_class</strong> (<code>str</code> or <code>type</code>, <em>optional</em>, defaults to <code>&quot;FlaxAutoModel&quot;</code>) &#x2014; The auto class to register this new model with.`,name:"auto_class"}]}}),Yt=new fl({props:{warning:"&lcub;true}",$$slots:{default:[dw]},$$scope:{ctx:S}}}),Xn=new v({props:{name:"save_pretrained",anchor:"transformers.FlaxPreTrainedModel.save_pretrained",parameters:[{name:"save_directory",val:": typing.Union[str, os.PathLike]"},{name:"params",val:" = None"},{name:"push_to_hub",val:" = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_flax_utils.py#L659",parametersDescription:[{anchor:"transformers.FlaxPreTrainedModel.save_pretrained.save_directory",description:`<strong>save_directory</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; Directory to which to save. Will be created if it doesn&#x2019;t exist.`,name:"save_directory"},{anchor:"transformers.FlaxPreTrainedModel.save_pretrained.push_to_hub",description:`<strong>push_to_hub</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to push your model to the Hugging Face model hub after saving it.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p>Using <code>push_to_hub=True</code> will synchronize the repository you are pushing to with <code>save_directory</code>, which requires <code>save_directory</code> to be a local clone of the repo you are pushing to if it&#x2019;s an existing folder. Pass along <code>temp_dir=True</code> to use a temporary directory instead.</p> </div> <p>kwargs &#x2014; Additional key word arguments passed along to the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.file_utils.PushToHubMixin.push_to_hub">push_to_hub()</a> method.`,name:"push_to_hub"}]}}),Vn=new v({props:{name:"to_bf16",anchor:"transformers.FlaxPreTrainedModel.to_bf16",parameters:[{name:"params",val:": typing.Union[typing.Dict, flax.core.frozen_dict.FrozenDict]"},{name:"mask",val:": typing.Any = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_flax_utils.py#L191",parametersDescription:[{anchor:"transformers.FlaxPreTrainedModel.to_bf16.params",description:`<strong>params</strong> (<code>Union[Dict, FrozenDict]</code>) &#x2014; A <code>PyTree</code> of model parameters.`,name:"params"},{anchor:"transformers.FlaxPreTrainedModel.to_bf16.mask",description:`<strong>mask</strong> (<code>Union[Dict, FrozenDict]</code>) &#x2014; A <code>PyTree</code> with same structure as the <code>params</code> tree. The leaves should be booleans, <code>True</code> for params you want to cast, and should be <code>False</code> for those you want to skip.`,name:"mask"}]}}),Sn=new Q({props:{code:`from transformers import FlaxBertModel # load model model = FlaxBertModel.from_pretrained("bert-base-cased") # By default, the model parameters will be in fp32 precision, to cast these to bfloat16 precision model.params = model.to_bf16(model.params) # If you want don't want to cast certain parameters (for example layer norm bias and scale) # then pass the mask as follows from flax import traverse_util model = FlaxBertModel.from_pretrained("bert-base-cased") flat_params = traverse_util.flatten_dict(model.params) mask = { path: (path[-2] != ("LayerNorm", "bias") and path[-2:] != ("LayerNorm", "scale")) for path in flat_params } mask = traverse_util.unflatten_dict(mask) model.params = model.to_bf16(model.params, mask)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FlaxBertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># load model</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># By default, the model parameters will be in fp32 precision, to cast these to bfloat16 precision</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.params = model.to_bf16(model.params) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># If you want don&#x27;t want to cast certain parameters (for example layer norm bias and scale)</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># then pass the mask as follows</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> flax <span class="hljs-keyword">import</span> traverse_util <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>flat_params = traverse_util.flatten_dict(model.params) <span class="hljs-meta">&gt;&gt;&gt; </span>mask = { <span class="hljs-meta">... </span> path: (path[-<span class="hljs-number">2</span>] != (<span class="hljs-string">&quot;LayerNorm&quot;</span>, <span class="hljs-string">&quot;bias&quot;</span>) <span class="hljs-keyword">and</span> path[-<span class="hljs-number">2</span>:] != (<span class="hljs-string">&quot;LayerNorm&quot;</span>, <span class="hljs-string">&quot;scale&quot;</span>)) <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> path <span class="hljs-keyword">in</span> flat_params <span class="hljs-meta">... </span>} <span class="hljs-meta">&gt;&gt;&gt; </span>mask = traverse_util.unflatten_dict(mask) <span class="hljs-meta">&gt;&gt;&gt; </span>model.params = model.to_bf16(model.params, mask)`}}),Wn=new v({props:{name:"to_fp16",anchor:"transformers.FlaxPreTrainedModel.to_fp16",parameters:[{name:"params",val:": typing.Union[typing.Dict, flax.core.frozen_dict.FrozenDict]"},{name:"mask",val:": typing.Any = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_flax_utils.py#L257",parametersDescription:[{anchor:"transformers.FlaxPreTrainedModel.to_fp16.params",description:`<strong>params</strong> (<code>Union[Dict, FrozenDict]</code>) &#x2014; A <code>PyTree</code> of model parameters.`,name:"params"},{anchor:"transformers.FlaxPreTrainedModel.to_fp16.mask",description:`<strong>mask</strong> (<code>Union[Dict, FrozenDict]</code>) &#x2014; A <code>PyTree</code> with same structure as the <code>params</code> tree. The leaves should be booleans, <code>True</code> for params you want to cast, and should be <code>False</code> for those you want to skip`,name:"mask"}]}}),Gn=new Q({props:{code:`from transformers import FlaxBertModel # load model model = FlaxBertModel.from_pretrained("bert-base-cased") # By default, the model params will be in fp32, to cast these to float16 model.params = model.to_fp16(model.params) # If you want don't want to cast certain parameters (for example layer norm bias and scale) # then pass the mask as follows from flax import traverse_util model = FlaxBertModel.from_pretrained("bert-base-cased") flat_params = traverse_util.flatten_dict(model.params) mask = { path: (path[-2] != ("LayerNorm", "bias") and path[-2:] != ("LayerNorm", "scale")) for path in flat_params } mask = traverse_util.unflatten_dict(mask) model.params = model.to_fp16(model.params, mask)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FlaxBertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># load model</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># By default, the model params will be in fp32, to cast these to float16</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.params = model.to_fp16(model.params) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># If you want don&#x27;t want to cast certain parameters (for example layer norm bias and scale)</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># then pass the mask as follows</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> flax <span class="hljs-keyword">import</span> traverse_util <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>flat_params = traverse_util.flatten_dict(model.params) <span class="hljs-meta">&gt;&gt;&gt; </span>mask = { <span class="hljs-meta">... </span> path: (path[-<span class="hljs-number">2</span>] != (<span class="hljs-string">&quot;LayerNorm&quot;</span>, <span class="hljs-string">&quot;bias&quot;</span>) <span class="hljs-keyword">and</span> path[-<span class="hljs-number">2</span>:] != (<span class="hljs-string">&quot;LayerNorm&quot;</span>, <span class="hljs-string">&quot;scale&quot;</span>)) <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> path <span class="hljs-keyword">in</span> flat_params <span class="hljs-meta">... </span>} <span class="hljs-meta">&gt;&gt;&gt; </span>mask = traverse_util.unflatten_dict(mask) <span class="hljs-meta">&gt;&gt;&gt; </span>model.params = model.to_fp16(model.params, mask)`}}),Rn=new v({props:{name:"to_fp32",anchor:"transformers.FlaxPreTrainedModel.to_fp32",parameters:[{name:"params",val:": typing.Union[typing.Dict, flax.core.frozen_dict.FrozenDict]"},{name:"mask",val:": typing.Any = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_flax_utils.py#L230",parametersDescription:[{anchor:"transformers.FlaxPreTrainedModel.to_fp32.params",description:`<strong>params</strong> (<code>Union[Dict, FrozenDict]</code>) &#x2014; A <code>PyTree</code> of model parameters.`,name:"params"},{anchor:"transformers.FlaxPreTrainedModel.to_fp32.mask",description:`<strong>mask</strong> (<code>Union[Dict, FrozenDict]</code>) &#x2014; A <code>PyTree</code> with same structure as the <code>params</code> tree. The leaves should be booleans, <code>True</code> for params you want to cast, and should be <code>False</code> for those you want to skip`,name:"mask"}]}}),Yn=new Q({props:{code:`from transformers import FlaxBertModel # Download model and configuration from huggingface.co model = FlaxBertModel.from_pretrained("bert-base-cased") # By default, the model params will be in fp32, to illustrate the use of this method, # we'll first cast to fp16 and back to fp32 model.params = model.to_f16(model.params) # now cast back to fp32 model.params = model.to_fp32(model.params)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FlaxBertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># By default, the model params will be in fp32, to illustrate the use of this method,</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># we&#x27;ll first cast to fp16 and back to fp32</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.params = model.to_f16(model.params) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># now cast back to fp32</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.params = model.to_fp32(model.params)`}}),Kn=new ao({}),Jn=new v({props:{name:"class transformers.file_utils.PushToHubMixin",anchor:"transformers.file_utils.PushToHubMixin",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/file_utils.py#L2837"}}),Qn=new v({props:{name:"push_to_hub",anchor:"transformers.file_utils.PushToHubMixin.push_to_hub",parameters:[{name:"repo_path_or_name",val:": typing.Optional[str] = None"},{name:"repo_url",val:": typing.Optional[str] = None"},{name:"use_temp_dir",val:": bool = False"},{name:"commit_message",val:": typing.Optional[str] = None"},{name:"organization",val:": typing.Optional[str] = None"},{name:"private",val:": typing.Optional[bool] = None"},{name:"use_auth_token",val:": typing.Union[bool, str, NoneType] = None"},{name:"**model_card_kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/file_utils.py#L2842",parametersDescription:[{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.repo_path_or_name",description:`<strong>repo_path_or_name</strong> (<code>str</code>, <em>optional</em>) &#x2014; Can either be a repository name for your {object} in the Hub or a path to a local folder (in which case the repository will have the name of that local folder). If not specified, will default to the name given by <code>repo_url</code> and a local directory with that name will be created.`,name:"repo_path_or_name"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.repo_url",description:`<strong>repo_url</strong> (<code>str</code>, <em>optional</em>) &#x2014; Specify this in case you want to push to an existing repository in the hub. If unspecified, a new repository will be created in your namespace (unless you specify an <code>organization</code>) with <code>repo_name</code>.`,name:"repo_url"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.use_temp_dir",description:`<strong>use_temp_dir</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to clone the distant repo in a temporary directory or in <code>repo_path_or_name</code> inside the current working directory. This will slow things down if you are making changes in an existing repo since you will need to clone the repo before every push.`,name:"use_temp_dir"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.commit_message",description:`<strong>commit_message</strong> (<code>str</code>, <em>optional</em>) &#x2014; Message to commit while pushing. Will default to <code>&quot;add {object}&quot;</code>.`,name:"commit_message"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.organization",description:`<strong>organization</strong> (<code>str</code>, <em>optional</em>) &#x2014; Organization in which you want to push your {object} (you must be a member of this organization).`,name:"organization"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.private",description:`<strong>private</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not the repository created should be private (requires a paying subscription).`,name:"private"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.use_auth_token",description:`<strong>use_auth_token</strong> (<code>bool</code> or <code>str</code>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>transformers-cli login</code> (stored in <code>~/.huggingface</code>). Will default to <code>True</code> if <code>repo_url</code> is not specified.`,name:"use_auth_token"}],returnDescription:` <p>The url of the commit of your {object} in the given repository.</p> `,returnType:` <p><code>str</code></p> `}}),er=new Q({props:{code:`from transformers import {object_class} {object} = {object_class}.from_pretrained("bert-base-cased") # Push the {object} to your namespace with the name "my-finetuned-bert" and have a local clone in the # *my-finetuned-bert* folder. {object}.push_to_hub("my-finetuned-bert") # Push the {object} to your namespace with the name "my-finetuned-bert" with no local clone. {object}.push_to_hub("my-finetuned-bert", use_temp_dir=True) # Push the {object} to an organization with the name "my-finetuned-bert" and have a local clone in the # *my-finetuned-bert* folder. {object}.push_to_hub("my-finetuned-bert", organization="huggingface") # Make a change to an existing repo that has been cloned locally in *my-finetuned-bert*. {object}.push_to_hub("my-finetuned-bert", repo_url="https://huggingface.co/sgugger/my-finetuned-bert")`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> {object_class} {<span class="hljs-built_in">object</span>} = {object_class}.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-comment"># Push the {object} to your namespace with the name &quot;my-finetuned-bert&quot; and have a local clone in the</span> <span class="hljs-comment"># *my-finetuned-bert* folder.</span> {<span class="hljs-built_in">object</span>}.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>) <span class="hljs-comment"># Push the {object} to your namespace with the name &quot;my-finetuned-bert&quot; with no local clone.</span> {<span class="hljs-built_in">object</span>}.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, use_temp_dir=<span class="hljs-literal">True</span>) <span class="hljs-comment"># Push the {object} to an organization with the name &quot;my-finetuned-bert&quot; and have a local clone in the</span> <span class="hljs-comment"># *my-finetuned-bert* folder.</span> {<span class="hljs-built_in">object</span>}.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, organization=<span class="hljs-string">&quot;huggingface&quot;</span>) <span class="hljs-comment"># Make a change to an existing repo that has been cloned locally in *my-finetuned-bert*.</span> {<span class="hljs-built_in">object</span>}.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, repo_url=<span class="hljs-string">&quot;https://huggingface.co/sgugger/my-finetuned-bert&quot;</span>)`}}),{c(){w=o("meta"),k=l(),x=o("h1"),E=o("a"),W=o("span"),c(A.$$.fragment),L=l(),de=o("span"),Jd=s("Models"),ul=l(),Z=o("p"),Qd=s("The base classes "),sr=o("a"),Zd=s("PreTrainedModel"),em=s(", "),ir=o("a"),tm=s("TFPreTrainedModel"),om=s(`, and `),lr=o("a"),nm=s("FlaxPreTrainedModel"),rm=s(` implement the common methods for loading/saving a model either from a local file or directory, or from a pretrained model configuration provided by the library (downloaded from HuggingFace\u2019s AWS S3 repository).`),gl=l(),Le=o("p"),dr=o("a"),am=s("PreTrainedModel"),sm=s(" and "),mr=o("a"),im=s("TFPreTrainedModel"),lm=s(` also implement a few methods which are common among all the models to:`),_l=l(),Ze=o("ul"),Qr=o("li"),dm=s("resize the input token embeddings when new tokens are added to the vocabulary"),mm=l(),Zr=o("li"),cm=s("prune the attention heads of the model."),bl=l(),z=o("p"),pm=s("The other methods that are common to each model are defined in "),cr=o("a"),hm=s("ModuleUtilsMixin"),fm=s(` (for the PyTorch models) and `),ea=o("code"),um=s("TFModuleUtilsMixin"),gm=s(` (for the TensorFlow models) or for text generation, `),pr=o("a"),_m=s("GenerationMixin"),bm=s(` (for the PyTorch models), `),hr=o("a"),vm=s("TFGenerationMixin"),ym=s(` (for the TensorFlow models) and `),fr=o("a"),$m=s("FlaxGenerationMixin"),wm=s(" (for the Flax/JAX models)."),vl=l(),ze=o("h2"),et=o("a"),ta=o("span"),c(so.$$.fragment),Tm=l(),oa=o("span"),xm=s("PreTrainedModel"),yl=l(),T=o("div"),c(io.$$.fragment),Pm=l(),na=o("p"),Em=s("Base class for all models."),Mm=l(),ur=o("p"),gr=o("a"),km=s("PreTrainedModel"),jm=s(` takes care of storing the configuration of the models and handles methods for loading, downloading and saving models as well as a few methods common to all models to:`),Dm=l(),lo=o("ul"),ra=o("li"),Fm=s("resize the input embeddings,"),qm=l(),aa=o("li"),Am=s("prune heads in the self-attention heads."),Cm=l(),sa=o("p"),Im=s("Class attributes (overridden by derived classes):"),Lm=l(),G=o("ul"),ia=o("li"),ge=o("p"),la=o("strong"),zm=s("config_class"),Om=s(" ("),_r=o("a"),Um=s("PretrainedConfig"),Nm=s(") \u2014 A subclass of "),br=o("a"),Hm=s("PretrainedConfig"),Xm=s(` to use as configuration class for this model architecture.`),Bm=l(),mo=o("li"),_e=o("p"),da=o("strong"),Vm=s("load_tf_weights"),Sm=s(" ("),ma=o("code"),Wm=s("Callable"),Gm=s(") \u2014 A python "),ca=o("em"),Rm=s("method"),Ym=s(` for loading a TensorFlow checkpoint in a PyTorch model, taking as arguments:`),Km=l(),Oe=o("ul"),tt=o("li"),pa=o("strong"),Jm=s("model"),Qm=s(" ("),vr=o("a"),Zm=s("PreTrainedModel"),ec=s(") \u2014 An instance of the model on which to load the TensorFlow checkpoint."),tc=l(),ot=o("li"),ha=o("strong"),oc=s("config"),nc=s(" ("),fa=o("code"),rc=s("PreTrainedConfig"),ac=s(") \u2014 An instance of the configuration associated to the model."),sc=l(),nt=o("li"),ua=o("strong"),ic=s("path"),lc=s(" ("),ga=o("code"),dc=s("str"),mc=s(") \u2014 A path to the TensorFlow checkpoint."),cc=l(),_a=o("li"),rt=o("p"),ba=o("strong"),pc=s("base_model_prefix"),hc=s(" ("),va=o("code"),fc=s("str"),uc=s(`) \u2014 A string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model.`),gc=l(),ya=o("li"),at=o("p"),$a=o("strong"),_c=s("is_parallelizable"),bc=s(" ("),wa=o("code"),vc=s("bool"),yc=s(") \u2014 A flag indicating whether this model supports model parallelization."),$c=l(),Ta=o("li"),U=o("p"),xa=o("strong"),wc=s("main_input_name"),Tc=s(" ("),Pa=o("code"),xc=s("str"),Pc=s(") \u2014 The name of the principal input to the model (often "),Ea=o("code"),Ec=s("input_ids"),Mc=s(` for NLP models, `),Ma=o("code"),kc=s("pixel_values"),jc=s(" for vision models and "),ka=o("code"),Dc=s("input_values"),Fc=s(" for speech models)."),qc=l(),ee=o("div"),c(co.$$.fragment),Ac=l(),po=o("p"),Cc=s(`Upload the model checkpoint to the \u{1F917} Model Hub while synchronizing a local clone of the repo in `),ja=o("code"),Ic=s("repo_path_or_name"),Lc=s("."),zc=l(),Da=o("p"),Oc=s("Examples:"),Uc=l(),c(ho.$$.fragment),Nc=l(),F=o("div"),c(fo.$$.fragment),Hc=l(),Fa=o("p"),Xc=s("Instantiate a pretrained pytorch model from a pre-trained model configuration."),Bc=l(),Ue=o("p"),Vc=s("The model is set in evaluation mode by default using "),qa=o("code"),Sc=s("model.eval()"),Wc=s(` (Dropout modules are deactivated). To train the model, you should first set it back in training mode with `),Aa=o("code"),Gc=s("model.train()"),Rc=s("."),Yc=l(),uo=o("p"),Kc=s("The warning "),Ca=o("em"),Jc=s("Weights from XXX not initialized from pretrained model"),Qc=s(` means that the weights of XXX do not come pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning task.`),Zc=l(),go=o("p"),ep=s("The warning "),Ia=o("em"),tp=s("Weights from XXX not used in YYY"),op=s(` means that the layer XXX is not used by YYY, therefore those weights are discarded.`),np=l(),c(st.$$.fragment),rp=l(),c(it.$$.fragment),ap=l(),La=o("p"),sp=s("Examples:"),ip=l(),c(_o.$$.fragment),lp=l(),lt=o("div"),c(bo.$$.fragment),dp=l(),za=o("p"),mp=s("Returns the model\u2019s input embeddings."),cp=l(),dt=o("div"),c(vo.$$.fragment),pp=l(),Oa=o("p"),hp=s("Returns the model\u2019s output embeddings."),fp=l(),be=o("div"),c(yo.$$.fragment),up=l(),Ua=o("p"),gp=s("Deactivates gradient checkpointing for the current model."),_p=l(),Na=o("p"),bp=s(`Note that in other frameworks this feature can be referred to as \u201Cactivation checkpointing\u201D or \u201Ccheckpoint activations\u201D.`),vp=l(),ve=o("div"),c($o.$$.fragment),yp=l(),Ha=o("p"),$p=s("Activates gradient checkpointing for the current model."),wp=l(),Xa=o("p"),Tp=s(`Note that in other frameworks this feature can be referred to as \u201Cactivation checkpointing\u201D or \u201Ccheckpoint activations\u201D.`),xp=l(),mt=o("div"),c(wo.$$.fragment),Pp=l(),Ba=o("p"),Ep=s("If needed prunes and maybe initializes weights."),Mp=l(),ct=o("div"),c(To.$$.fragment),kp=l(),Va=o("p"),jp=s(`A method executed at the end of each Transformer model initialization, to execute code that needs the model\u2019s modules properly initialized (such as weight initialization).`),Dp=l(),pt=o("div"),c(xo.$$.fragment),Fp=l(),Sa=o("p"),qp=s("Prunes heads of the base model."),Ap=l(),ye=o("div"),c(Po.$$.fragment),Cp=l(),Wa=o("p"),Ip=s(`Register this class with a given auto class. This should only be used for custom models as the ones in the library are already mapped with an auto class.`),Lp=l(),c(ht.$$.fragment),zp=l(),$e=o("div"),c(Eo.$$.fragment),Op=l(),Mo=o("p"),Up=s("Resizes input token embeddings matrix of the model if "),Ga=o("code"),Np=s("new_num_tokens != config.vocab_size"),Hp=s("."),Xp=l(),ko=o("p"),Bp=s("Takes care of tying weights embeddings afterwards if the model class has a "),Ra=o("code"),Vp=s("tie_weights()"),Sp=s(" method."),Wp=l(),ft=o("div"),c(jo.$$.fragment),Gp=l(),Do=o("p"),Rp=s(`Save a model and its configuration file to a directory, so that it can be re-loaded using the `),Ya=o("code"),Yp=s("[from_pretrained()](/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained)"),Kp=s(" class method."),Jp=l(),ut=o("div"),c(Fo.$$.fragment),Qp=l(),Ka=o("p"),Zp=s("Set model\u2019s input embeddings."),eh=l(),we=o("div"),c(qo.$$.fragment),th=l(),Ja=o("p"),oh=s("Tie the weights between the input embeddings and the output embeddings."),nh=l(),Ao=o("p"),rh=s("If the "),Qa=o("code"),ah=s("torchscript"),sh=s(` flag is set in the configuration, can\u2019t handle parameter sharing so we are cloning the weights instead.`),$l=l(),yr=o("a"),wl=l(),Ne=o("h3"),gt=o("a"),Za=o("span"),c(Co.$$.fragment),ih=l(),es=o("span"),lh=s("Model Instantiation dtype"),Tl=l(),te=o("p"),dh=s("Under Pytorch a model normally gets instantiated with "),ts=o("code"),mh=s("torch.float32"),ch=s(` format. This can be an issue if one tries to load a model whose weights are in fp16, since it\u2019d require twice as much memory. To overcome this limitation, you can either explicitly pass the desired `),os=o("code"),ph=s("dtype"),hh=s(" using "),ns=o("code"),fh=s("torch_dtype"),uh=s(" argument:"),xl=l(),c(Io.$$.fragment),Pl=l(),Te=o("p"),gh=s("or, if you want the model to always load in the most optimal memory pattern, you can use the special value "),rs=o("code"),_h=s('"auto"'),bh=s(`, and then `),as=o("code"),vh=s("dtype"),yh=s(" will be automatically derived from the model\u2019s weights:"),El=l(),c(Lo.$$.fragment),Ml=l(),_t=o("p"),$h=s("Models instantiated from scratch can also be told which "),ss=o("code"),wh=s("dtype"),Th=s(" to use with:"),kl=l(),c(zo.$$.fragment),jl=l(),$r=o("p"),xh=s("Due to Pytorch design, this functionality is only available for floating dtypes."),Dl=l(),He=o("h2"),bt=o("a"),is=o("span"),c(Oo.$$.fragment),Ph=l(),ls=o("span"),Eh=s("ModuleUtilsMixin"),Fl=l(),j=o("div"),c(Uo.$$.fragment),Mh=l(),No=o("p"),kh=s("A few utilities for "),ds=o("code"),jh=s("torch.nn.Modules"),Dh=s(", to be used as a mixin."),Fh=l(),xe=o("div"),c(Ho.$$.fragment),qh=l(),ms=o("p"),Ah=s("Add a memory hook before and after each sub-module forward pass to record increase in memory consumption."),Ch=l(),Xe=o("p"),Ih=s("Increase in memory consumption is stored in a "),cs=o("code"),Lh=s("mem_rss_diff"),zh=s(` attribute for each module and can be reset to zero with `),ps=o("code"),Oh=s("model.reset_memory_hooks_state()"),Uh=s("."),Nh=l(),vt=o("div"),c(Xo.$$.fragment),Hh=l(),hs=o("p"),Xh=s("Helper function to estimate the total number of tokens from the model inputs."),Bh=l(),yt=o("div"),c(Bo.$$.fragment),Vh=l(),Be=o("p"),Sh=s(`Get number of (optionally, non-embeddings) floating-point operations for the forward and backward passes of a batch with this transformer model. Default approximation neglects the quadratic dependency on the number of tokens (valid if `),fs=o("code"),Wh=s("12 * d_model << sequence_length"),Gh=s(") as laid out in "),Vo=o("a"),Rh=s(`this paper`),Yh=s(` section 2.1. Should be overridden for transformers with parameter re-use e.g. Albert or Universal Transformers, or if doing long-range modeling with very high sequence lengths.`),Kh=l(),$t=o("div"),c(So.$$.fragment),Jh=l(),us=o("p"),Qh=s("Makes broadcastable attention and causal masks so that future and masked tokens are ignored."),Zh=l(),wt=o("div"),c(Wo.$$.fragment),ef=l(),gs=o("p"),tf=s("Prepare the head mask if needed."),of=l(),Tt=o("div"),c(Go.$$.fragment),nf=l(),_s=o("p"),rf=s("Invert an attention mask (e.g., switches 0. and 1.)."),af=l(),xt=o("div"),c(Ro.$$.fragment),sf=l(),bs=o("p"),lf=s("Get number of (optionally, trainable or non-embeddings) parameters in the module."),df=l(),Pt=o("div"),c(Yo.$$.fragment),mf=l(),Ve=o("p"),cf=s("Reset the "),vs=o("code"),pf=s("mem_rss_diff"),hf=s(" attribute of each module (see "),wr=o("a"),ff=s("add_memory_hooks()"),uf=s(")."),ql=l(),Se=o("h2"),Et=o("a"),ys=o("span"),c(Ko.$$.fragment),gf=l(),$s=o("span"),_f=s("TFPreTrainedModel"),Al=l(),b=o("div"),c(Jo.$$.fragment),bf=l(),ws=o("p"),vf=s("Base class for all TF models."),yf=l(),Tr=o("p"),xr=o("a"),$f=s("TFPreTrainedModel"),wf=s(` takes care of storing the configuration of the models and handles methods for loading, downloading and saving models as well as a few methods common to all models to:`),Tf=l(),Qo=o("ul"),Ts=o("li"),xf=s("resize the input embeddings,"),Pf=l(),xs=o("li"),Ef=s("prune heads in the self-attention heads."),Mf=l(),Ps=o("p"),kf=s("Class attributes (overridden by derived classes):"),jf=l(),We=o("ul"),Pe=o("li"),Es=o("strong"),Df=s("config_class"),Ff=s(" ("),Pr=o("a"),qf=s("PretrainedConfig"),Af=s(") \u2014 A subclass of "),Er=o("a"),Cf=s("PretrainedConfig"),If=s(` to use as configuration class for this model architecture.`),Lf=l(),Mt=o("li"),Ms=o("strong"),zf=s("base_model_prefix"),Of=s(" ("),ks=o("code"),Uf=s("str"),Nf=s(`) \u2014 A string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model.`),Hf=l(),N=o("li"),js=o("strong"),Xf=s("main_input_name"),Bf=s(" ("),Ds=o("code"),Vf=s("str"),Sf=s(") \u2014 The name of the principal input to the model (often "),Fs=o("code"),Wf=s("input_ids"),Gf=s(` for NLP models, `),qs=o("code"),Rf=s("pixel_values"),Yf=s(" for vision models and "),As=o("code"),Kf=s("input_values"),Jf=s(" for speech models)."),Qf=l(),oe=o("div"),c(Zo.$$.fragment),Zf=l(),en=o("p"),eu=s(`Upload the model checkpoint to the \u{1F917} Model Hub while synchronizing a local clone of the repo in `),Cs=o("code"),tu=s("repo_path_or_name"),ou=s("."),nu=l(),Is=o("p"),ru=s("Examples:"),au=l(),c(tn.$$.fragment),su=l(),kt=o("div"),c(on.$$.fragment),iu=l(),Ls=o("p"),lu=s(`This is a thin wrapper that sets the model\u2019s loss output head as the loss if the user does not specify a loss function themselves.`),du=l(),C=o("div"),c(nn.$$.fragment),mu=l(),zs=o("p"),cu=s("Instantiate a pretrained TF 2.0 model from a pre-trained model configuration."),pu=l(),rn=o("p"),hu=s("The warning "),Os=o("em"),fu=s("Weights from XXX not initialized from pretrained model"),uu=s(` means that the weights of XXX do not come pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning task.`),gu=l(),an=o("p"),_u=s("The warning "),Us=o("em"),bu=s("Weights from XXX not used in YYY"),vu=s(` means that the layer XXX is not used by YYY, therefore those weights are discarded.`),yu=l(),c(jt.$$.fragment),$u=l(),Ns=o("p"),wu=s("Examples:"),Tu=l(),c(sn.$$.fragment),xu=l(),Dt=o("div"),c(ln.$$.fragment),Pu=l(),Hs=o("p"),Eu=s("Dict of bias attached to an LM head. The key represents the name of the bias attribute."),Mu=l(),Ft=o("div"),c(dn.$$.fragment),ku=l(),Xs=o("p"),ju=s("Returns the model\u2019s input embeddings layer."),Du=l(),qt=o("div"),c(mn.$$.fragment),Fu=l(),Bs=o("p"),qu=s("The LM Head layer. This method must be overwritten by all the models that have a lm head."),Au=l(),At=o("div"),c(cn.$$.fragment),Cu=l(),Vs=o("p"),Iu=s("Returns the model\u2019s output embeddings"),Lu=l(),Ct=o("div"),c(pn.$$.fragment),zu=l(),Ss=o("p"),Ou=s(`Get the layer that handles a bias attribute in case the model has an LM head with weights tied to the embeddings`),Uu=l(),It=o("div"),c(hn.$$.fragment),Nu=l(),Ws=o("p"),Hu=s("Get the concatenated _prefix name of the bias from the model name to the parent layer"),Xu=l(),Lt=o("div"),c(fn.$$.fragment),Bu=l(),Gs=o("p"),Vu=s(`Loads a saved checkpoint (model weights and optimizer state) from a repo. Returns the current epoch count when the checkpoint was made.`),Su=l(),zt=o("div"),c(un.$$.fragment),Wu=l(),Rs=o("p"),Gu=s("Prunes heads of the base model."),Ru=l(),Ee=o("div"),c(gn.$$.fragment),Yu=l(),_n=o("p"),Ku=s("Resizes input token embeddings matrix of the model if "),Ys=o("code"),Ju=s("new_num_tokens != config.vocab_size"),Qu=s("."),Zu=l(),bn=o("p"),eg=s("Takes care of tying weights embeddings afterwards if the model class has a "),Ks=o("code"),tg=s("tie_weights()"),og=s(" method."),ng=l(),Ot=o("div"),c(vn.$$.fragment),rg=l(),yn=o("p"),ag=s(`Save a model and its configuration file to a directory, so that it can be re-loaded using the `),Mr=o("a"),sg=s("from_pretrained()"),ig=s(" class method."),lg=l(),Ut=o("div"),c($n.$$.fragment),dg=l(),Js=o("p"),mg=s("Method used for serving the model."),cg=l(),Nt=o("div"),c(wn.$$.fragment),pg=l(),Qs=o("p"),hg=s("Prepare the output of the saved model. Each model must implement this function."),fg=l(),Ht=o("div"),c(Tn.$$.fragment),ug=l(),Zs=o("p"),gg=s("Set all the bias in the LM head."),_g=l(),Xt=o("div"),c(xn.$$.fragment),bg=l(),ei=o("p"),vg=s("Set model\u2019s input embeddings"),yg=l(),Bt=o("div"),c(Pn.$$.fragment),$g=l(),ti=o("p"),wg=s("Set model\u2019s output embeddings"),Tg=l(),Vt=o("div"),c(En.$$.fragment),xg=l(),oi=o("p"),Pg=s("A modification of Keras\u2019s default test_step that cleans up the printed metrics when we use a dummy loss."),Eg=l(),ne=o("div"),c(Mn.$$.fragment),Mg=l(),R=o("p"),kg=s("A modification of Keras\u2019s default "),ni=o("code"),jg=s("train_step"),Dg=s(` that cleans up the printed metrics when we use a dummy loss. If a user specifies a loss at model compile time, this function behaves as the original Keras `),ri=o("code"),Fg=s("train_step"),qg=s(`. In this case, it expects the same `),ai=o("code"),Ag=s("data"),Cg=s(" as the original function (i.e. "),si=o("code"),Ig=s("(inputs, labels)"),Lg=s(")."),zg=l(),Ge=o("p"),Og=s(`However, when the model is compiled without specifying the loss AND the expected label columns are passed as part of the input dictionary, the loss is computed internally (inside the model class) and is used in the backwards pass. In this case, `),ii=o("code"),Ug=s("data"),Ng=s(" is a singleton tuple containing "),li=o("code"),Hg=s("(inputs,)"),Xg=s("."),Bg=l(),Re=o("p"),Vg=s(`This is possible under the aforementioned circumstances because our overriden compile function can set an additional loss function that reduces a `),di=o("code"),Sg=s("loss"),Wg=s(" output, and the model will output a "),mi=o("code"),Gg=s("loss"),Rg=s(` component (notice the name matching) containing the loss that was used to train the pre-trained model.`),Cl=l(),Ye=o("h2"),St=o("a"),ci=o("span"),c(kn.$$.fragment),Yg=l(),pi=o("span"),Kg=s("TFModelUtilsMixin"),Il=l(),me=o("div"),c(jn.$$.fragment),Jg=l(),Dn=o("p"),Qg=s("A few utilities for "),hi=o("code"),Zg=s("tf.keras.Model"),e_=s(", to be used as a mixin."),t_=l(),Wt=o("div"),c(Fn.$$.fragment),o_=l(),fi=o("p"),n_=s("Get the number of (optionally, trainable) parameters in the model."),Ll=l(),Ke=o("h2"),Gt=o("a"),ui=o("span"),c(qn.$$.fragment),r_=l(),gi=o("span"),a_=s("FlaxPreTrainedModel"),zl=l(),M=o("div"),c(An.$$.fragment),s_=l(),_i=o("p"),i_=s("Base class for all models."),l_=l(),kr=o("p"),jr=o("a"),d_=s("FlaxPreTrainedModel"),m_=s(` takes care of storing the configuration of the models and handles methods for loading, downloading and saving models.`),c_=l(),bi=o("p"),p_=s("Class attributes (overridden by derived classes):"),h_=l(),Je=o("ul"),Me=o("li"),vi=o("strong"),f_=s("config_class"),u_=s(" ("),Dr=o("a"),g_=s("PretrainedConfig"),__=s(") \u2014 A subclass of "),Fr=o("a"),b_=s("PretrainedConfig"),v_=s(` to use as configuration class for this model architecture.`),y_=l(),Rt=o("li"),yi=o("strong"),$_=s("base_model_prefix"),w_=s(" ("),$i=o("code"),T_=s("str"),x_=s(`) \u2014 A string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model.`),P_=l(),H=o("li"),wi=o("strong"),E_=s("main_input_name"),M_=s(" ("),Ti=o("code"),k_=s("str"),j_=s(") \u2014 The name of the principal input to the model (often "),xi=o("code"),D_=s("input_ids"),F_=s(` for NLP models, `),Pi=o("code"),q_=s("pixel_values"),A_=s(" for vision models and "),Ei=o("code"),C_=s("input_values"),I_=s(" for speech models)."),L_=l(),re=o("div"),c(Cn.$$.fragment),z_=l(),In=o("p"),O_=s(`Upload the model checkpoint to the \u{1F917} Model Hub while synchronizing a local clone of the repo in `),Mi=o("code"),U_=s("repo_path_or_name"),N_=s("."),H_=l(),ki=o("p"),X_=s("Examples:"),B_=l(),c(Ln.$$.fragment),V_=l(),O=o("div"),c(zn.$$.fragment),S_=l(),ji=o("p"),W_=s("Instantiate a pretrained flax model from a pre-trained model configuration."),G_=l(),On=o("p"),R_=s("The warning "),Di=o("em"),Y_=s("Weights from XXX not initialized from pretrained model"),K_=s(` means that the weights of XXX do not come pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning task.`),J_=l(),Un=o("p"),Q_=s("The warning "),Fi=o("em"),Z_=s("Weights from XXX not used in YYY"),eb=s(` means that the layer XXX is not used by YYY, therefore those weights are discarded.`),tb=l(),qi=o("p"),ob=s("Examples:"),nb=l(),c(Nn.$$.fragment),rb=l(),ke=o("div"),c(Hn.$$.fragment),ab=l(),Ai=o("p"),sb=s(`Register this class with a given auto class. This should only be used for custom models as the ones in the library are already mapped with an auto class.`),ib=l(),c(Yt.$$.fragment),lb=l(),Kt=o("div"),c(Xn.$$.fragment),db=l(),Bn=o("p"),mb=s(`Save a model and its configuration file to a directory, so that it can be re-loaded using the `),Ci=o("code"),cb=s("[from_pretrained()](/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained)"),pb=s(" class method"),hb=l(),X=o("div"),c(Vn.$$.fragment),fb=l(),Y=o("p"),ub=s("Cast the floating-point "),Ii=o("code"),gb=s("params"),_b=s(" to "),Li=o("code"),bb=s("jax.numpy.bfloat16"),vb=s(". This returns a new "),zi=o("code"),yb=s("params"),$b=s(` tree and does not cast the `),Oi=o("code"),wb=s("params"),Tb=s(" in place."),xb=l(),Ui=o("p"),Pb=s(`This method can be used on TPU to explicitly convert the model parameters to bfloat16 precision to do full half-precision training or to save weights in bfloat16 for inference in order to save memory and improve speed.`),Eb=l(),Ni=o("p"),Mb=s("Examples:"),kb=l(),c(Sn.$$.fragment),jb=l(),B=o("div"),c(Wn.$$.fragment),Db=l(),K=o("p"),Fb=s("Cast the floating-point "),Hi=o("code"),qb=s("parmas"),Ab=s(" to "),Xi=o("code"),Cb=s("jax.numpy.float16"),Ib=s(". This returns a new "),Bi=o("code"),Lb=s("params"),zb=s(` tree and does not cast the `),Vi=o("code"),Ob=s("params"),Ub=s(" in place."),Nb=l(),Si=o("p"),Hb=s(`This method can be used on GPU to explicitly convert the model parameters to float16 precision to do full half-precision training or to save weights in float16 for inference in order to save memory and improve speed.`),Xb=l(),Wi=o("p"),Bb=s("Examples:"),Vb=l(),c(Gn.$$.fragment),Sb=l(),ae=o("div"),c(Rn.$$.fragment),Wb=l(),J=o("p"),Gb=s("Cast the floating-point "),Gi=o("code"),Rb=s("parmas"),Yb=s(" to "),Ri=o("code"),Kb=s("jax.numpy.float32"),Jb=s(`. This method can be used to explicitly convert the model parameters to fp32 precision. This returns a new `),Yi=o("code"),Qb=s("params"),Zb=s(" tree and does not cast the "),Ki=o("code"),ev=s("params"),tv=s(" in place."),ov=l(),Ji=o("p"),nv=s("Examples:"),rv=l(),c(Yn.$$.fragment),Ol=l(),Qe=o("h2"),Jt=o("a"),Qi=o("span"),c(Kn.$$.fragment),av=l(),Zi=o("span"),sv=s("Pushing to the Hub"),Ul=l(),ce=o("div"),c(Jn.$$.fragment),iv=l(),el=o("p"),lv=s("A Mixin containing the functionality to push a model or tokenizer to the hub."),dv=l(),se=o("div"),c(Qn.$$.fragment),mv=l(),Zn=o("p"),cv=s(`Upload the {object_files} to the \u{1F917} Model Hub while synchronizing a local clone of the repo in `),tl=o("code"),pv=s("repo_path_or_name"),hv=s("."),fv=l(),ol=o("p"),uv=s("Examples:"),gv=l(),c(er.$$.fragment),this.h()},l(a){const _=rw('[data-svelte="svelte-1phssyn"]',document.head);w=n(_,"META",{name:!0,content:!0}),_.forEach(t),k=d(a),x=n(a,"H1",{class:!0});var tr=r(x);E=n(tr,"A",{id:!0,class:!0,href:!0});var nl=r(E);W=n(nl,"SPAN",{});var rl=r(W);p(A.$$.fragment,rl),rl.forEach(t),nl.forEach(t),L=d(tr),de=n(tr,"SPAN",{});var al=r(de);Jd=i(al,"Models"),al.forEach(t),tr.forEach(t),ul=d(a),Z=n(a,"P",{});var pe=r(Z);Qd=i(pe,"The base classes "),sr=n(pe,"A",{href:!0});var yv=r(sr);Zd=i(yv,"PreTrainedModel"),yv.forEach(t),em=i(pe,", "),ir=n(pe,"A",{href:!0});var $v=r(ir);tm=i($v,"TFPreTrainedModel"),$v.forEach(t),om=i(pe,`, and `),lr=n(pe,"A",{href:!0});var wv=r(lr);nm=i(wv,"FlaxPreTrainedModel"),wv.forEach(t),rm=i(pe,` implement the common methods for loading/saving a model either from a local file or directory, or from a pretrained model configuration provided by the library (downloaded from HuggingFace\u2019s AWS S3 repository).`),pe.forEach(t),gl=d(a),Le=n(a,"P",{});var sl=r(Le);dr=n(sl,"A",{href:!0});var Tv=r(dr);am=i(Tv,"PreTrainedModel"),Tv.forEach(t),sm=i(sl," and "),mr=n(sl,"A",{href:!0});var xv=r(mr);im=i(xv,"TFPreTrainedModel"),xv.forEach(t),lm=i(sl,` also implement a few methods which are common among all the models to:`),sl.forEach(t),_l=d(a),Ze=n(a,"UL",{});var Hl=r(Ze);Qr=n(Hl,"LI",{});var Pv=r(Qr);dm=i(Pv,"resize the input token embeddings when new tokens are added to the vocabulary"),Pv.forEach(t),mm=d(Hl),Zr=n(Hl,"LI",{});var Ev=r(Zr);cm=i(Ev,"prune the attention heads of the model."),Ev.forEach(t),Hl.forEach(t),bl=d(a),z=n(a,"P",{});var ie=r(z);pm=i(ie,"The other methods that are common to each model are defined in "),cr=n(ie,"A",{href:!0});var Mv=r(cr);hm=i(Mv,"ModuleUtilsMixin"),Mv.forEach(t),fm=i(ie,` (for the PyTorch models) and `),ea=n(ie,"CODE",{});var kv=r(ea);um=i(kv,"TFModuleUtilsMixin"),kv.forEach(t),gm=i(ie,` (for the TensorFlow models) or for text generation, `),pr=n(ie,"A",{href:!0});var jv=r(pr);_m=i(jv,"GenerationMixin"),jv.forEach(t),bm=i(ie,` (for the PyTorch models), `),hr=n(ie,"A",{href:!0});var Dv=r(hr);vm=i(Dv,"TFGenerationMixin"),Dv.forEach(t),ym=i(ie,` (for the TensorFlow models) and `),fr=n(ie,"A",{href:!0});var Fv=r(fr);$m=i(Fv,"FlaxGenerationMixin"),Fv.forEach(t),wm=i(ie," (for the Flax/JAX models)."),ie.forEach(t),vl=d(a),ze=n(a,"H2",{class:!0});var Xl=r(ze);et=n(Xl,"A",{id:!0,class:!0,href:!0});var qv=r(et);ta=n(qv,"SPAN",{});var Av=r(ta);p(so.$$.fragment,Av),Av.forEach(t),qv.forEach(t),Tm=d(Xl),oa=n(Xl,"SPAN",{});var Cv=r(oa);xm=i(Cv,"PreTrainedModel"),Cv.forEach(t),Xl.forEach(t),yl=d(a),T=n(a,"DIV",{class:!0});var P=r(T);p(io.$$.fragment,P),Pm=d(P),na=n(P,"P",{});var Iv=r(na);Em=i(Iv,"Base class for all models."),Iv.forEach(t),Mm=d(P),ur=n(P,"P",{});var _v=r(ur);gr=n(_v,"A",{href:!0});var Lv=r(gr);km=i(Lv,"PreTrainedModel"),Lv.forEach(t),jm=i(_v,` takes care of storing the configuration of the models and handles methods for loading, downloading and saving models as well as a few methods common to all models to:`),_v.forEach(t),Dm=d(P),lo=n(P,"UL",{});var Bl=r(lo);ra=n(Bl,"LI",{});var zv=r(ra);Fm=i(zv,"resize the input embeddings,"),zv.forEach(t),qm=d(Bl),aa=n(Bl,"LI",{});var Ov=r(aa);Am=i(Ov,"prune heads in the self-attention heads."),Ov.forEach(t),Bl.forEach(t),Cm=d(P),sa=n(P,"P",{});var Uv=r(sa);Im=i(Uv,"Class attributes (overridden by derived classes):"),Uv.forEach(t),Lm=d(P),G=n(P,"UL",{});var je=r(G);ia=n(je,"LI",{});var Nv=r(ia);ge=n(Nv,"P",{});var or=r(ge);la=n(or,"STRONG",{});var Hv=r(la);zm=i(Hv,"config_class"),Hv.forEach(t),Om=i(or," ("),_r=n(or,"A",{href:!0});var Xv=r(_r);Um=i(Xv,"PretrainedConfig"),Xv.forEach(t),Nm=i(or,") \u2014 A subclass of "),br=n(or,"A",{href:!0});var Bv=r(br);Hm=i(Bv,"PretrainedConfig"),Bv.forEach(t),Xm=i(or,` to use as configuration class for this model architecture.`),or.forEach(t),Nv.forEach(t),Bm=d(je),mo=n(je,"LI",{});var Vl=r(mo);_e=n(Vl,"P",{});var nr=r(_e);da=n(nr,"STRONG",{});var Vv=r(da);Vm=i(Vv,"load_tf_weights"),Vv.forEach(t),Sm=i(nr," ("),ma=n(nr,"CODE",{});var Sv=r(ma);Wm=i(Sv,"Callable"),Sv.forEach(t),Gm=i(nr,") \u2014 A python "),ca=n(nr,"EM",{});var Wv=r(ca);Rm=i(Wv,"method"),Wv.forEach(t),Ym=i(nr,` for loading a TensorFlow checkpoint in a PyTorch model, taking as arguments:`),nr.forEach(t),Km=d(Vl),Oe=n(Vl,"UL",{});var qr=r(Oe);tt=n(qr,"LI",{});var il=r(tt);pa=n(il,"STRONG",{});var Gv=r(pa);Jm=i(Gv,"model"),Gv.forEach(t),Qm=i(il," ("),vr=n(il,"A",{href:!0});var Rv=r(vr);Zm=i(Rv,"PreTrainedModel"),Rv.forEach(t),ec=i(il,") \u2014 An instance of the model on which to load the TensorFlow checkpoint."),il.forEach(t),tc=d(qr),ot=n(qr,"LI",{});var ll=r(ot);ha=n(ll,"STRONG",{});var Yv=r(ha);oc=i(Yv,"config"),Yv.forEach(t),nc=i(ll," ("),fa=n(ll,"CODE",{});var Kv=r(fa);rc=i(Kv,"PreTrainedConfig"),Kv.forEach(t),ac=i(ll,") \u2014 An instance of the configuration associated to the model."),ll.forEach(t),sc=d(qr),nt=n(qr,"LI",{});var dl=r(nt);ua=n(dl,"STRONG",{});var Jv=r(ua);ic=i(Jv,"path"),Jv.forEach(t),lc=i(dl," ("),ga=n(dl,"CODE",{});var Qv=r(ga);dc=i(Qv,"str"),Qv.forEach(t),mc=i(dl,") \u2014 A path to the TensorFlow checkpoint."),dl.forEach(t),qr.forEach(t),Vl.forEach(t),cc=d(je),_a=n(je,"LI",{});var Zv=r(_a);rt=n(Zv,"P",{});var ml=r(rt);ba=n(ml,"STRONG",{});var e1=r(ba);pc=i(e1,"base_model_prefix"),e1.forEach(t),hc=i(ml," ("),va=n(ml,"CODE",{});var t1=r(va);fc=i(t1,"str"),t1.forEach(t),uc=i(ml,`) \u2014 A string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model.`),ml.forEach(t),Zv.forEach(t),gc=d(je),ya=n(je,"LI",{});var o1=r(ya);at=n(o1,"P",{});var cl=r(at);$a=n(cl,"STRONG",{});var n1=r($a);_c=i(n1,"is_parallelizable"),n1.forEach(t),bc=i(cl," ("),wa=n(cl,"CODE",{});var r1=r(wa);vc=i(r1,"bool"),r1.forEach(t),yc=i(cl,") \u2014 A flag indicating whether this model supports model parallelization."),cl.forEach(t),o1.forEach(t),$c=d(je),Ta=n(je,"LI",{});var a1=r(Ta);U=n(a1,"P",{});var he=r(U);xa=n(he,"STRONG",{});var s1=r(xa);wc=i(s1,"main_input_name"),s1.forEach(t),Tc=i(he," ("),Pa=n(he,"CODE",{});var i1=r(Pa);xc=i(i1,"str"),i1.forEach(t),Pc=i(he,") \u2014 The name of the principal input to the model (often "),Ea=n(he,"CODE",{});var l1=r(Ea);Ec=i(l1,"input_ids"),l1.forEach(t),Mc=i(he,` for NLP models, `),Ma=n(he,"CODE",{});var d1=r(Ma);kc=i(d1,"pixel_values"),d1.forEach(t),jc=i(he," for vision models and "),ka=n(he,"CODE",{});var m1=r(ka);Dc=i(m1,"input_values"),m1.forEach(t),Fc=i(he," for speech models)."),he.forEach(t),a1.forEach(t),je.forEach(t),qc=d(P),ee=n(P,"DIV",{class:!0});var Qt=r(ee);p(co.$$.fragment,Qt),Ac=d(Qt),po=n(Qt,"P",{});var Sl=r(po);Cc=i(Sl,`Upload the model checkpoint to the \u{1F917} Model Hub while synchronizing a local clone of the repo in `),ja=n(Sl,"CODE",{});var c1=r(ja);Ic=i(c1,"repo_path_or_name"),c1.forEach(t),Lc=i(Sl,"."),Sl.forEach(t),zc=d(Qt),Da=n(Qt,"P",{});var p1=r(Da);Oc=i(p1,"Examples:"),p1.forEach(t),Uc=d(Qt),p(ho.$$.fragment,Qt),Qt.forEach(t),Nc=d(P),F=n(P,"DIV",{class:!0});var I=r(F);p(fo.$$.fragment,I),Hc=d(I),Fa=n(I,"P",{});var h1=r(Fa);Xc=i(h1,"Instantiate a pretrained pytorch model from a pre-trained model configuration."),h1.forEach(t),Bc=d(I),Ue=n(I,"P",{});var Ar=r(Ue);Vc=i(Ar,"The model is set in evaluation mode by default using "),qa=n(Ar,"CODE",{});var f1=r(qa);Sc=i(f1,"model.eval()"),f1.forEach(t),Wc=i(Ar,` (Dropout modules are deactivated). To train the model, you should first set it back in training mode with `),Aa=n(Ar,"CODE",{});var u1=r(Aa);Gc=i(u1,"model.train()"),u1.forEach(t),Rc=i(Ar,"."),Ar.forEach(t),Yc=d(I),uo=n(I,"P",{});var Wl=r(uo);Kc=i(Wl,"The warning "),Ca=n(Wl,"EM",{});var g1=r(Ca);Jc=i(g1,"Weights from XXX not initialized from pretrained model"),g1.forEach(t),Qc=i(Wl,` means that the weights of XXX do not come pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning task.`),Wl.forEach(t),Zc=d(I),go=n(I,"P",{});var Gl=r(go);ep=i(Gl,"The warning "),Ia=n(Gl,"EM",{});var _1=r(Ia);tp=i(_1,"Weights from XXX not used in YYY"),_1.forEach(t),op=i(Gl,` means that the layer XXX is not used by YYY, therefore those weights are discarded.`),Gl.forEach(t),np=d(I),p(st.$$.fragment,I),rp=d(I),p(it.$$.fragment,I),ap=d(I),La=n(I,"P",{});var b1=r(La);sp=i(b1,"Examples:"),b1.forEach(t),ip=d(I),p(_o.$$.fragment,I),I.forEach(t),lp=d(P),lt=n(P,"DIV",{class:!0});var Rl=r(lt);p(bo.$$.fragment,Rl),dp=d(Rl),za=n(Rl,"P",{});var v1=r(za);mp=i(v1,"Returns the model\u2019s input embeddings."),v1.forEach(t),Rl.forEach(t),cp=d(P),dt=n(P,"DIV",{class:!0});var Yl=r(dt);p(vo.$$.fragment,Yl),pp=d(Yl),Oa=n(Yl,"P",{});var y1=r(Oa);hp=i(y1,"Returns the model\u2019s output embeddings."),y1.forEach(t),Yl.forEach(t),fp=d(P),be=n(P,"DIV",{class:!0});var Cr=r(be);p(yo.$$.fragment,Cr),up=d(Cr),Ua=n(Cr,"P",{});var $1=r(Ua);gp=i($1,"Deactivates gradient checkpointing for the current model."),$1.forEach(t),_p=d(Cr),Na=n(Cr,"P",{});var w1=r(Na);bp=i(w1,`Note that in other frameworks this feature can be referred to as \u201Cactivation checkpointing\u201D or \u201Ccheckpoint activations\u201D.`),w1.forEach(t),Cr.forEach(t),vp=d(P),ve=n(P,"DIV",{class:!0});var Ir=r(ve);p($o.$$.fragment,Ir),yp=d(Ir),Ha=n(Ir,"P",{});var T1=r(Ha);$p=i(T1,"Activates gradient checkpointing for the current model."),T1.forEach(t),wp=d(Ir),Xa=n(Ir,"P",{});var x1=r(Xa);Tp=i(x1,`Note that in other frameworks this feature can be referred to as \u201Cactivation checkpointing\u201D or \u201Ccheckpoint activations\u201D.`),x1.forEach(t),Ir.forEach(t),xp=d(P),mt=n(P,"DIV",{class:!0});var Kl=r(mt);p(wo.$$.fragment,Kl),Pp=d(Kl),Ba=n(Kl,"P",{});var P1=r(Ba);Ep=i(P1,"If needed prunes and maybe initializes weights."),P1.forEach(t),Kl.forEach(t),Mp=d(P),ct=n(P,"DIV",{class:!0});var Jl=r(ct);p(To.$$.fragment,Jl),kp=d(Jl),Va=n(Jl,"P",{});var E1=r(Va);jp=i(E1,`A method executed at the end of each Transformer model initialization, to execute code that needs the model\u2019s modules properly initialized (such as weight initialization).`),E1.forEach(t),Jl.forEach(t),Dp=d(P),pt=n(P,"DIV",{class:!0});var Ql=r(pt);p(xo.$$.fragment,Ql),Fp=d(Ql),Sa=n(Ql,"P",{});var M1=r(Sa);qp=i(M1,"Prunes heads of the base model."),M1.forEach(t),Ql.forEach(t),Ap=d(P),ye=n(P,"DIV",{class:!0});var Lr=r(ye);p(Po.$$.fragment,Lr),Cp=d(Lr),Wa=n(Lr,"P",{});var k1=r(Wa);Ip=i(k1,`Register this class with a given auto class. This should only be used for custom models as the ones in the library are already mapped with an auto class.`),k1.forEach(t),Lp=d(Lr),p(ht.$$.fragment,Lr),Lr.forEach(t),zp=d(P),$e=n(P,"DIV",{class:!0});var zr=r($e);p(Eo.$$.fragment,zr),Op=d(zr),Mo=n(zr,"P",{});var Zl=r(Mo);Up=i(Zl,"Resizes input token embeddings matrix of the model if "),Ga=n(Zl,"CODE",{});var j1=r(Ga);Np=i(j1,"new_num_tokens != config.vocab_size"),j1.forEach(t),Hp=i(Zl,"."),Zl.forEach(t),Xp=d(zr),ko=n(zr,"P",{});var ed=r(ko);Bp=i(ed,"Takes care of tying weights embeddings afterwards if the model class has a "),Ra=n(ed,"CODE",{});var D1=r(Ra);Vp=i(D1,"tie_weights()"),D1.forEach(t),Sp=i(ed," method."),ed.forEach(t),zr.forEach(t),Wp=d(P),ft=n(P,"DIV",{class:!0});var td=r(ft);p(jo.$$.fragment,td),Gp=d(td),Do=n(td,"P",{});var od=r(Do);Rp=i(od,`Save a model and its configuration file to a directory, so that it can be re-loaded using the `),Ya=n(od,"CODE",{});var F1=r(Ya);Yp=i(F1,"[from_pretrained()](/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained)"),F1.forEach(t),Kp=i(od," class method."),od.forEach(t),td.forEach(t),Jp=d(P),ut=n(P,"DIV",{class:!0});var nd=r(ut);p(Fo.$$.fragment,nd),Qp=d(nd),Ka=n(nd,"P",{});var q1=r(Ka);Zp=i(q1,"Set model\u2019s input embeddings."),q1.forEach(t),nd.forEach(t),eh=d(P),we=n(P,"DIV",{class:!0});var Or=r(we);p(qo.$$.fragment,Or),th=d(Or),Ja=n(Or,"P",{});var A1=r(Ja);oh=i(A1,"Tie the weights between the input embeddings and the output embeddings."),A1.forEach(t),nh=d(Or),Ao=n(Or,"P",{});var rd=r(Ao);rh=i(rd,"If the "),Qa=n(rd,"CODE",{});var C1=r(Qa);ah=i(C1,"torchscript"),C1.forEach(t),sh=i(rd,` flag is set in the configuration, can\u2019t handle parameter sharing so we are cloning the weights instead.`),rd.forEach(t),Or.forEach(t),P.forEach(t),$l=d(a),yr=n(a,"A",{id:!0}),r(yr).forEach(t),wl=d(a),Ne=n(a,"H3",{class:!0});var ad=r(Ne);gt=n(ad,"A",{id:!0,class:!0,href:!0});var I1=r(gt);Za=n(I1,"SPAN",{});var L1=r(Za);p(Co.$$.fragment,L1),L1.forEach(t),I1.forEach(t),ih=d(ad),es=n(ad,"SPAN",{});var z1=r(es);lh=i(z1,"Model Instantiation dtype"),z1.forEach(t),ad.forEach(t),Tl=d(a),te=n(a,"P",{});var Zt=r(te);dh=i(Zt,"Under Pytorch a model normally gets instantiated with "),ts=n(Zt,"CODE",{});var O1=r(ts);mh=i(O1,"torch.float32"),O1.forEach(t),ch=i(Zt,` format. This can be an issue if one tries to load a model whose weights are in fp16, since it\u2019d require twice as much memory. To overcome this limitation, you can either explicitly pass the desired `),os=n(Zt,"CODE",{});var U1=r(os);ph=i(U1,"dtype"),U1.forEach(t),hh=i(Zt," using "),ns=n(Zt,"CODE",{});var N1=r(ns);fh=i(N1,"torch_dtype"),N1.forEach(t),uh=i(Zt," argument:"),Zt.forEach(t),xl=d(a),p(Io.$$.fragment,a),Pl=d(a),Te=n(a,"P",{});var Ur=r(Te);gh=i(Ur,"or, if you want the model to always load in the most optimal memory pattern, you can use the special value "),rs=n(Ur,"CODE",{});var H1=r(rs);_h=i(H1,'"auto"'),H1.forEach(t),bh=i(Ur,`, and then `),as=n(Ur,"CODE",{});var X1=r(as);vh=i(X1,"dtype"),X1.forEach(t),yh=i(Ur," will be automatically derived from the model\u2019s weights:"),Ur.forEach(t),El=d(a),p(Lo.$$.fragment,a),Ml=d(a),_t=n(a,"P",{});var sd=r(_t);$h=i(sd,"Models instantiated from scratch can also be told which "),ss=n(sd,"CODE",{});var B1=r(ss);wh=i(B1,"dtype"),B1.forEach(t),Th=i(sd," to use with:"),sd.forEach(t),kl=d(a),p(zo.$$.fragment,a),jl=d(a),$r=n(a,"P",{});var V1=r($r);xh=i(V1,"Due to Pytorch design, this functionality is only available for floating dtypes."),V1.forEach(t),Dl=d(a),He=n(a,"H2",{class:!0});var id=r(He);bt=n(id,"A",{id:!0,class:!0,href:!0});var S1=r(bt);is=n(S1,"SPAN",{});var W1=r(is);p(Oo.$$.fragment,W1),W1.forEach(t),S1.forEach(t),Ph=d(id),ls=n(id,"SPAN",{});var G1=r(ls);Eh=i(G1,"ModuleUtilsMixin"),G1.forEach(t),id.forEach(t),Fl=d(a),j=n(a,"DIV",{class:!0});var q=r(j);p(Uo.$$.fragment,q),Mh=d(q),No=n(q,"P",{});var ld=r(No);kh=i(ld,"A few utilities for "),ds=n(ld,"CODE",{});var R1=r(ds);jh=i(R1,"torch.nn.Modules"),R1.forEach(t),Dh=i(ld,", to be used as a mixin."),ld.forEach(t),Fh=d(q),xe=n(q,"DIV",{class:!0});var Nr=r(xe);p(Ho.$$.fragment,Nr),qh=d(Nr),ms=n(Nr,"P",{});var Y1=r(ms);Ah=i(Y1,"Add a memory hook before and after each sub-module forward pass to record increase in memory consumption."),Y1.forEach(t),Ch=d(Nr),Xe=n(Nr,"P",{});var Hr=r(Xe);Ih=i(Hr,"Increase in memory consumption is stored in a "),cs=n(Hr,"CODE",{});var K1=r(cs);Lh=i(K1,"mem_rss_diff"),K1.forEach(t),zh=i(Hr,` attribute for each module and can be reset to zero with `),ps=n(Hr,"CODE",{});var J1=r(ps);Oh=i(J1,"model.reset_memory_hooks_state()"),J1.forEach(t),Uh=i(Hr,"."),Hr.forEach(t),Nr.forEach(t),Nh=d(q),vt=n(q,"DIV",{class:!0});var dd=r(vt);p(Xo.$$.fragment,dd),Hh=d(dd),hs=n(dd,"P",{});var Q1=r(hs);Xh=i(Q1,"Helper function to estimate the total number of tokens from the model inputs."),Q1.forEach(t),dd.forEach(t),Bh=d(q),yt=n(q,"DIV",{class:!0});var md=r(yt);p(Bo.$$.fragment,md),Vh=d(md),Be=n(md,"P",{});var Xr=r(Be);Sh=i(Xr,`Get number of (optionally, non-embeddings) floating-point operations for the forward and backward passes of a batch with this transformer model. Default approximation neglects the quadratic dependency on the number of tokens (valid if `),fs=n(Xr,"CODE",{});var Z1=r(fs);Wh=i(Z1,"12 * d_model << sequence_length"),Z1.forEach(t),Gh=i(Xr,") as laid out in "),Vo=n(Xr,"A",{href:!0,rel:!0});var ey=r(Vo);Rh=i(ey,`this paper`),ey.forEach(t),Yh=i(Xr,` section 2.1. Should be overridden for transformers with parameter re-use e.g. Albert or Universal Transformers, or if doing long-range modeling with very high sequence lengths.`),Xr.forEach(t),md.forEach(t),Kh=d(q),$t=n(q,"DIV",{class:!0});var cd=r($t);p(So.$$.fragment,cd),Jh=d(cd),us=n(cd,"P",{});var ty=r(us);Qh=i(ty,"Makes broadcastable attention and causal masks so that future and masked tokens are ignored."),ty.forEach(t),cd.forEach(t),Zh=d(q),wt=n(q,"DIV",{class:!0});var pd=r(wt);p(Wo.$$.fragment,pd),ef=d(pd),gs=n(pd,"P",{});var oy=r(gs);tf=i(oy,"Prepare the head mask if needed."),oy.forEach(t),pd.forEach(t),of=d(q),Tt=n(q,"DIV",{class:!0});var hd=r(Tt);p(Go.$$.fragment,hd),nf=d(hd),_s=n(hd,"P",{});var ny=r(_s);rf=i(ny,"Invert an attention mask (e.g., switches 0. and 1.)."),ny.forEach(t),hd.forEach(t),af=d(q),xt=n(q,"DIV",{class:!0});var fd=r(xt);p(Ro.$$.fragment,fd),sf=d(fd),bs=n(fd,"P",{});var ry=r(bs);lf=i(ry,"Get number of (optionally, trainable or non-embeddings) parameters in the module."),ry.forEach(t),fd.forEach(t),df=d(q),Pt=n(q,"DIV",{class:!0});var ud=r(Pt);p(Yo.$$.fragment,ud),mf=d(ud),Ve=n(ud,"P",{});var Br=r(Ve);cf=i(Br,"Reset the "),vs=n(Br,"CODE",{});var ay=r(vs);pf=i(ay,"mem_rss_diff"),ay.forEach(t),hf=i(Br," attribute of each module (see "),wr=n(Br,"A",{href:!0});var sy=r(wr);ff=i(sy,"add_memory_hooks()"),sy.forEach(t),uf=i(Br,")."),Br.forEach(t),ud.forEach(t),q.forEach(t),ql=d(a),Se=n(a,"H2",{class:!0});var gd=r(Se);Et=n(gd,"A",{id:!0,class:!0,href:!0});var iy=r(Et);ys=n(iy,"SPAN",{});var ly=r(ys);p(Ko.$$.fragment,ly),ly.forEach(t),iy.forEach(t),gf=d(gd),$s=n(gd,"SPAN",{});var dy=r($s);_f=i(dy,"TFPreTrainedModel"),dy.forEach(t),gd.forEach(t),Al=d(a),b=n(a,"DIV",{class:!0});var $=r(b);p(Jo.$$.fragment,$),bf=d($),ws=n($,"P",{});var my=r(ws);vf=i(my,"Base class for all TF models."),my.forEach(t),yf=d($),Tr=n($,"P",{});var bv=r(Tr);xr=n(bv,"A",{href:!0});var cy=r(xr);$f=i(cy,"TFPreTrainedModel"),cy.forEach(t),wf=i(bv,` takes care of storing the configuration of the models and handles methods for loading, downloading and saving models as well as a few methods common to all models to:`),bv.forEach(t),Tf=d($),Qo=n($,"UL",{});var _d=r(Qo);Ts=n(_d,"LI",{});var py=r(Ts);xf=i(py,"resize the input embeddings,"),py.forEach(t),Pf=d(_d),xs=n(_d,"LI",{});var hy=r(xs);Ef=i(hy,"prune heads in the self-attention heads."),hy.forEach(t),_d.forEach(t),Mf=d($),Ps=n($,"P",{});var fy=r(Ps);kf=i(fy,"Class attributes (overridden by derived classes):"),fy.forEach(t),jf=d($),We=n($,"UL",{});var Vr=r(We);Pe=n(Vr,"LI",{});var rr=r(Pe);Es=n(rr,"STRONG",{});var uy=r(Es);Df=i(uy,"config_class"),uy.forEach(t),Ff=i(rr," ("),Pr=n(rr,"A",{href:!0});var gy=r(Pr);qf=i(gy,"PretrainedConfig"),gy.forEach(t),Af=i(rr,") \u2014 A subclass of "),Er=n(rr,"A",{href:!0});var _y=r(Er);Cf=i(_y,"PretrainedConfig"),_y.forEach(t),If=i(rr,` to use as configuration class for this model architecture.`),rr.forEach(t),Lf=d(Vr),Mt=n(Vr,"LI",{});var pl=r(Mt);Ms=n(pl,"STRONG",{});var by=r(Ms);zf=i(by,"base_model_prefix"),by.forEach(t),Of=i(pl," ("),ks=n(pl,"CODE",{});var vy=r(ks);Uf=i(vy,"str"),vy.forEach(t),Nf=i(pl,`) \u2014 A string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model.`),pl.forEach(t),Hf=d(Vr),N=n(Vr,"LI",{});var fe=r(N);js=n(fe,"STRONG",{});var yy=r(js);Xf=i(yy,"main_input_name"),yy.forEach(t),Bf=i(fe," ("),Ds=n(fe,"CODE",{});var $y=r(Ds);Vf=i($y,"str"),$y.forEach(t),Sf=i(fe,") \u2014 The name of the principal input to the model (often "),Fs=n(fe,"CODE",{});var wy=r(Fs);Wf=i(wy,"input_ids"),wy.forEach(t),Gf=i(fe,` for NLP models, `),qs=n(fe,"CODE",{});var Ty=r(qs);Rf=i(Ty,"pixel_values"),Ty.forEach(t),Yf=i(fe," for vision models and "),As=n(fe,"CODE",{});var xy=r(As);Kf=i(xy,"input_values"),xy.forEach(t),Jf=i(fe," for speech models)."),fe.forEach(t),Vr.forEach(t),Qf=d($),oe=n($,"DIV",{class:!0});var eo=r(oe);p(Zo.$$.fragment,eo),Zf=d(eo),en=n(eo,"P",{});var bd=r(en);eu=i(bd,`Upload the model checkpoint to the \u{1F917} Model Hub while synchronizing a local clone of the repo in `),Cs=n(bd,"CODE",{});var Py=r(Cs);tu=i(Py,"repo_path_or_name"),Py.forEach(t),ou=i(bd,"."),bd.forEach(t),nu=d(eo),Is=n(eo,"P",{});var Ey=r(Is);ru=i(Ey,"Examples:"),Ey.forEach(t),au=d(eo),p(tn.$$.fragment,eo),eo.forEach(t),su=d($),kt=n($,"DIV",{class:!0});var vd=r(kt);p(on.$$.fragment,vd),iu=d(vd),Ls=n(vd,"P",{});var My=r(Ls);lu=i(My,`This is a thin wrapper that sets the model\u2019s loss output head as the loss if the user does not specify a loss function themselves.`),My.forEach(t),vd.forEach(t),du=d($),C=n($,"DIV",{class:!0});var V=r(C);p(nn.$$.fragment,V),mu=d(V),zs=n(V,"P",{});var ky=r(zs);cu=i(ky,"Instantiate a pretrained TF 2.0 model from a pre-trained model configuration."),ky.forEach(t),pu=d(V),rn=n(V,"P",{});var yd=r(rn);hu=i(yd,"The warning "),Os=n(yd,"EM",{});var jy=r(Os);fu=i(jy,"Weights from XXX not initialized from pretrained model"),jy.forEach(t),uu=i(yd,` means that the weights of XXX do not come pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning task.`),yd.forEach(t),gu=d(V),an=n(V,"P",{});var $d=r(an);_u=i($d,"The warning "),Us=n($d,"EM",{});var Dy=r(Us);bu=i(Dy,"Weights from XXX not used in YYY"),Dy.forEach(t),vu=i($d,` means that the layer XXX is not used by YYY, therefore those weights are discarded.`),$d.forEach(t),yu=d(V),p(jt.$$.fragment,V),$u=d(V),Ns=n(V,"P",{});var Fy=r(Ns);wu=i(Fy,"Examples:"),Fy.forEach(t),Tu=d(V),p(sn.$$.fragment,V),V.forEach(t),xu=d($),Dt=n($,"DIV",{class:!0});var wd=r(Dt);p(ln.$$.fragment,wd),Pu=d(wd),Hs=n(wd,"P",{});var qy=r(Hs);Eu=i(qy,"Dict of bias attached to an LM head. The key represents the name of the bias attribute."),qy.forEach(t),wd.forEach(t),Mu=d($),Ft=n($,"DIV",{class:!0});var Td=r(Ft);p(dn.$$.fragment,Td),ku=d(Td),Xs=n(Td,"P",{});var Ay=r(Xs);ju=i(Ay,"Returns the model\u2019s input embeddings layer."),Ay.forEach(t),Td.forEach(t),Du=d($),qt=n($,"DIV",{class:!0});var xd=r(qt);p(mn.$$.fragment,xd),Fu=d(xd),Bs=n(xd,"P",{});var Cy=r(Bs);qu=i(Cy,"The LM Head layer. This method must be overwritten by all the models that have a lm head."),Cy.forEach(t),xd.forEach(t),Au=d($),At=n($,"DIV",{class:!0});var Pd=r(At);p(cn.$$.fragment,Pd),Cu=d(Pd),Vs=n(Pd,"P",{});var Iy=r(Vs);Iu=i(Iy,"Returns the model\u2019s output embeddings"),Iy.forEach(t),Pd.forEach(t),Lu=d($),Ct=n($,"DIV",{class:!0});var Ed=r(Ct);p(pn.$$.fragment,Ed),zu=d(Ed),Ss=n(Ed,"P",{});var Ly=r(Ss);Ou=i(Ly,`Get the layer that handles a bias attribute in case the model has an LM head with weights tied to the embeddings`),Ly.forEach(t),Ed.forEach(t),Uu=d($),It=n($,"DIV",{class:!0});var Md=r(It);p(hn.$$.fragment,Md),Nu=d(Md),Ws=n(Md,"P",{});var zy=r(Ws);Hu=i(zy,"Get the concatenated _prefix name of the bias from the model name to the parent layer"),zy.forEach(t),Md.forEach(t),Xu=d($),Lt=n($,"DIV",{class:!0});var kd=r(Lt);p(fn.$$.fragment,kd),Bu=d(kd),Gs=n(kd,"P",{});var Oy=r(Gs);Vu=i(Oy,`Loads a saved checkpoint (model weights and optimizer state) from a repo. Returns the current epoch count when the checkpoint was made.`),Oy.forEach(t),kd.forEach(t),Su=d($),zt=n($,"DIV",{class:!0});var jd=r(zt);p(un.$$.fragment,jd),Wu=d(jd),Rs=n(jd,"P",{});var Uy=r(Rs);Gu=i(Uy,"Prunes heads of the base model."),Uy.forEach(t),jd.forEach(t),Ru=d($),Ee=n($,"DIV",{class:!0});var Sr=r(Ee);p(gn.$$.fragment,Sr),Yu=d(Sr),_n=n(Sr,"P",{});var Dd=r(_n);Ku=i(Dd,"Resizes input token embeddings matrix of the model if "),Ys=n(Dd,"CODE",{});var Ny=r(Ys);Ju=i(Ny,"new_num_tokens != config.vocab_size"),Ny.forEach(t),Qu=i(Dd,"."),Dd.forEach(t),Zu=d(Sr),bn=n(Sr,"P",{});var Fd=r(bn);eg=i(Fd,"Takes care of tying weights embeddings afterwards if the model class has a "),Ks=n(Fd,"CODE",{});var Hy=r(Ks);tg=i(Hy,"tie_weights()"),Hy.forEach(t),og=i(Fd," method."),Fd.forEach(t),Sr.forEach(t),ng=d($),Ot=n($,"DIV",{class:!0});var qd=r(Ot);p(vn.$$.fragment,qd),rg=d(qd),yn=n(qd,"P",{});var Ad=r(yn);ag=i(Ad,`Save a model and its configuration file to a directory, so that it can be re-loaded using the `),Mr=n(Ad,"A",{href:!0});var Xy=r(Mr);sg=i(Xy,"from_pretrained()"),Xy.forEach(t),ig=i(Ad," class method."),Ad.forEach(t),qd.forEach(t),lg=d($),Ut=n($,"DIV",{class:!0});var Cd=r(Ut);p($n.$$.fragment,Cd),dg=d(Cd),Js=n(Cd,"P",{});var By=r(Js);mg=i(By,"Method used for serving the model."),By.forEach(t),Cd.forEach(t),cg=d($),Nt=n($,"DIV",{class:!0});var Id=r(Nt);p(wn.$$.fragment,Id),pg=d(Id),Qs=n(Id,"P",{});var Vy=r(Qs);hg=i(Vy,"Prepare the output of the saved model. Each model must implement this function."),Vy.forEach(t),Id.forEach(t),fg=d($),Ht=n($,"DIV",{class:!0});var Ld=r(Ht);p(Tn.$$.fragment,Ld),ug=d(Ld),Zs=n(Ld,"P",{});var Sy=r(Zs);gg=i(Sy,"Set all the bias in the LM head."),Sy.forEach(t),Ld.forEach(t),_g=d($),Xt=n($,"DIV",{class:!0});var zd=r(Xt);p(xn.$$.fragment,zd),bg=d(zd),ei=n(zd,"P",{});var Wy=r(ei);vg=i(Wy,"Set model\u2019s input embeddings"),Wy.forEach(t),zd.forEach(t),yg=d($),Bt=n($,"DIV",{class:!0});var Od=r(Bt);p(Pn.$$.fragment,Od),$g=d(Od),ti=n(Od,"P",{});var Gy=r(ti);wg=i(Gy,"Set model\u2019s output embeddings"),Gy.forEach(t),Od.forEach(t),Tg=d($),Vt=n($,"DIV",{class:!0});var Ud=r(Vt);p(En.$$.fragment,Ud),xg=d(Ud),oi=n(Ud,"P",{});var Ry=r(oi);Pg=i(Ry,"A modification of Keras\u2019s default test_step that cleans up the printed metrics when we use a dummy loss."),Ry.forEach(t),Ud.forEach(t),Eg=d($),ne=n($,"DIV",{class:!0});var to=r(ne);p(Mn.$$.fragment,to),Mg=d(to),R=n(to,"P",{});var De=r(R);kg=i(De,"A modification of Keras\u2019s default "),ni=n(De,"CODE",{});var Yy=r(ni);jg=i(Yy,"train_step"),Yy.forEach(t),Dg=i(De,` that cleans up the printed metrics when we use a dummy loss. If a user specifies a loss at model compile time, this function behaves as the original Keras `),ri=n(De,"CODE",{});var Ky=r(ri);Fg=i(Ky,"train_step"),Ky.forEach(t),qg=i(De,`. In this case, it expects the same `),ai=n(De,"CODE",{});var Jy=r(ai);Ag=i(Jy,"data"),Jy.forEach(t),Cg=i(De," as the original function (i.e. "),si=n(De,"CODE",{});var Qy=r(si);Ig=i(Qy,"(inputs, labels)"),Qy.forEach(t),Lg=i(De,")."),De.forEach(t),zg=d(to),Ge=n(to,"P",{});var Wr=r(Ge);Og=i(Wr,`However, when the model is compiled without specifying the loss AND the expected label columns are passed as part of the input dictionary, the loss is computed internally (inside the model class) and is used in the backwards pass. In this case, `),ii=n(Wr,"CODE",{});var Zy=r(ii);Ug=i(Zy,"data"),Zy.forEach(t),Ng=i(Wr," is a singleton tuple containing "),li=n(Wr,"CODE",{});var e$=r(li);Hg=i(e$,"(inputs,)"),e$.forEach(t),Xg=i(Wr,"."),Wr.forEach(t),Bg=d(to),Re=n(to,"P",{});var Gr=r(Re);Vg=i(Gr,`This is possible under the aforementioned circumstances because our overriden compile function can set an additional loss function that reduces a `),di=n(Gr,"CODE",{});var t$=r(di);Sg=i(t$,"loss"),t$.forEach(t),Wg=i(Gr," output, and the model will output a "),mi=n(Gr,"CODE",{});var o$=r(mi);Gg=i(o$,"loss"),o$.forEach(t),Rg=i(Gr,` component (notice the name matching) containing the loss that was used to train the pre-trained model.`),Gr.forEach(t),to.forEach(t),$.forEach(t),Cl=d(a),Ye=n(a,"H2",{class:!0});var Nd=r(Ye);St=n(Nd,"A",{id:!0,class:!0,href:!0});var n$=r(St);ci=n(n$,"SPAN",{});var r$=r(ci);p(kn.$$.fragment,r$),r$.forEach(t),n$.forEach(t),Yg=d(Nd),pi=n(Nd,"SPAN",{});var a$=r(pi);Kg=i(a$,"TFModelUtilsMixin"),a$.forEach(t),Nd.forEach(t),Il=d(a),me=n(a,"DIV",{class:!0});var Rr=r(me);p(jn.$$.fragment,Rr),Jg=d(Rr),Dn=n(Rr,"P",{});var Hd=r(Dn);Qg=i(Hd,"A few utilities for "),hi=n(Hd,"CODE",{});var s$=r(hi);Zg=i(s$,"tf.keras.Model"),s$.forEach(t),e_=i(Hd,", to be used as a mixin."),Hd.forEach(t),t_=d(Rr),Wt=n(Rr,"DIV",{class:!0});var Xd=r(Wt);p(Fn.$$.fragment,Xd),o_=d(Xd),fi=n(Xd,"P",{});var i$=r(fi);n_=i(i$,"Get the number of (optionally, trainable) parameters in the model."),i$.forEach(t),Xd.forEach(t),Rr.forEach(t),Ll=d(a),Ke=n(a,"H2",{class:!0});var Bd=r(Ke);Gt=n(Bd,"A",{id:!0,class:!0,href:!0});var l$=r(Gt);ui=n(l$,"SPAN",{});var d$=r(ui);p(qn.$$.fragment,d$),d$.forEach(t),l$.forEach(t),r_=d(Bd),gi=n(Bd,"SPAN",{});var m$=r(gi);a_=i(m$,"FlaxPreTrainedModel"),m$.forEach(t),Bd.forEach(t),zl=d(a),M=n(a,"DIV",{class:!0});var D=r(M);p(An.$$.fragment,D),s_=d(D),_i=n(D,"P",{});var c$=r(_i);i_=i(c$,"Base class for all models."),c$.forEach(t),l_=d(D),kr=n(D,"P",{});var vv=r(kr);jr=n(vv,"A",{href:!0});var p$=r(jr);d_=i(p$,"FlaxPreTrainedModel"),p$.forEach(t),m_=i(vv,` takes care of storing the configuration of the models and handles methods for loading, downloading and saving models.`),vv.forEach(t),c_=d(D),bi=n(D,"P",{});var h$=r(bi);p_=i(h$,"Class attributes (overridden by derived classes):"),h$.forEach(t),h_=d(D),Je=n(D,"UL",{});var Yr=r(Je);Me=n(Yr,"LI",{});var ar=r(Me);vi=n(ar,"STRONG",{});var f$=r(vi);f_=i(f$,"config_class"),f$.forEach(t),u_=i(ar," ("),Dr=n(ar,"A",{href:!0});var u$=r(Dr);g_=i(u$,"PretrainedConfig"),u$.forEach(t),__=i(ar,") \u2014 A subclass of "),Fr=n(ar,"A",{href:!0});var g$=r(Fr);b_=i(g$,"PretrainedConfig"),g$.forEach(t),v_=i(ar,` to use as configuration class for this model architecture.`),ar.forEach(t),y_=d(Yr),Rt=n(Yr,"LI",{});var hl=r(Rt);yi=n(hl,"STRONG",{});var _$=r(yi);$_=i(_$,"base_model_prefix"),_$.forEach(t),w_=i(hl," ("),$i=n(hl,"CODE",{});var b$=r($i);T_=i(b$,"str"),b$.forEach(t),x_=i(hl,`) \u2014 A string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model.`),hl.forEach(t),P_=d(Yr),H=n(Yr,"LI",{});var ue=r(H);wi=n(ue,"STRONG",{});var v$=r(wi);E_=i(v$,"main_input_name"),v$.forEach(t),M_=i(ue," ("),Ti=n(ue,"CODE",{});var y$=r(Ti);k_=i(y$,"str"),y$.forEach(t),j_=i(ue,") \u2014 The name of the principal input to the model (often "),xi=n(ue,"CODE",{});var $$=r(xi);D_=i($$,"input_ids"),$$.forEach(t),F_=i(ue,` for NLP models, `),Pi=n(ue,"CODE",{});var w$=r(Pi);q_=i(w$,"pixel_values"),w$.forEach(t),A_=i(ue," for vision models and "),Ei=n(ue,"CODE",{});var T$=r(Ei);C_=i(T$,"input_values"),T$.forEach(t),I_=i(ue," for speech models)."),ue.forEach(t),Yr.forEach(t),L_=d(D),re=n(D,"DIV",{class:!0});var oo=r(re);p(Cn.$$.fragment,oo),z_=d(oo),In=n(oo,"P",{});var Vd=r(In);O_=i(Vd,`Upload the model checkpoint to the \u{1F917} Model Hub while synchronizing a local clone of the repo in `),Mi=n(Vd,"CODE",{});var x$=r(Mi);U_=i(x$,"repo_path_or_name"),x$.forEach(t),N_=i(Vd,"."),Vd.forEach(t),H_=d(oo),ki=n(oo,"P",{});var P$=r(ki);X_=i(P$,"Examples:"),P$.forEach(t),B_=d(oo),p(Ln.$$.fragment,oo),oo.forEach(t),V_=d(D),O=n(D,"DIV",{class:!0});var le=r(O);p(zn.$$.fragment,le),S_=d(le),ji=n(le,"P",{});var E$=r(ji);W_=i(E$,"Instantiate a pretrained flax model from a pre-trained model configuration."),E$.forEach(t),G_=d(le),On=n(le,"P",{});var Sd=r(On);R_=i(Sd,"The warning "),Di=n(Sd,"EM",{});var M$=r(Di);Y_=i(M$,"Weights from XXX not initialized from pretrained model"),M$.forEach(t),K_=i(Sd,` means that the weights of XXX do not come pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning task.`),Sd.forEach(t),J_=d(le),Un=n(le,"P",{});var Wd=r(Un);Q_=i(Wd,"The warning "),Fi=n(Wd,"EM",{});var k$=r(Fi);Z_=i(k$,"Weights from XXX not used in YYY"),k$.forEach(t),eb=i(Wd,` means that the layer XXX is not used by YYY, therefore those weights are discarded.`),Wd.forEach(t),tb=d(le),qi=n(le,"P",{});var j$=r(qi);ob=i(j$,"Examples:"),j$.forEach(t),nb=d(le),p(Nn.$$.fragment,le),le.forEach(t),rb=d(D),ke=n(D,"DIV",{class:!0});var Kr=r(ke);p(Hn.$$.fragment,Kr),ab=d(Kr),Ai=n(Kr,"P",{});var D$=r(Ai);sb=i(D$,`Register this class with a given auto class. This should only be used for custom models as the ones in the library are already mapped with an auto class.`),D$.forEach(t),ib=d(Kr),p(Yt.$$.fragment,Kr),Kr.forEach(t),lb=d(D),Kt=n(D,"DIV",{class:!0});var Gd=r(Kt);p(Xn.$$.fragment,Gd),db=d(Gd),Bn=n(Gd,"P",{});var Rd=r(Bn);mb=i(Rd,`Save a model and its configuration file to a directory, so that it can be re-loaded using the `),Ci=n(Rd,"CODE",{});var F$=r(Ci);cb=i(F$,"[from_pretrained()](/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained)"),F$.forEach(t),pb=i(Rd," class method"),Rd.forEach(t),Gd.forEach(t),hb=d(D),X=n(D,"DIV",{class:!0});var Fe=r(X);p(Vn.$$.fragment,Fe),fb=d(Fe),Y=n(Fe,"P",{});var qe=r(Y);ub=i(qe,"Cast the floating-point "),Ii=n(qe,"CODE",{});var q$=r(Ii);gb=i(q$,"params"),q$.forEach(t),_b=i(qe," to "),Li=n(qe,"CODE",{});var A$=r(Li);bb=i(A$,"jax.numpy.bfloat16"),A$.forEach(t),vb=i(qe,". This returns a new "),zi=n(qe,"CODE",{});var C$=r(zi);yb=i(C$,"params"),C$.forEach(t),$b=i(qe,` tree and does not cast the `),Oi=n(qe,"CODE",{});var I$=r(Oi);wb=i(I$,"params"),I$.forEach(t),Tb=i(qe," in place."),qe.forEach(t),xb=d(Fe),Ui=n(Fe,"P",{});var L$=r(Ui);Pb=i(L$,`This method can be used on TPU to explicitly convert the model parameters to bfloat16 precision to do full half-precision training or to save weights in bfloat16 for inference in order to save memory and improve speed.`),L$.forEach(t),Eb=d(Fe),Ni=n(Fe,"P",{});var z$=r(Ni);Mb=i(z$,"Examples:"),z$.forEach(t),kb=d(Fe),p(Sn.$$.fragment,Fe),Fe.forEach(t),jb=d(D),B=n(D,"DIV",{class:!0});var Ae=r(B);p(Wn.$$.fragment,Ae),Db=d(Ae),K=n(Ae,"P",{});var Ce=r(K);Fb=i(Ce,"Cast the floating-point "),Hi=n(Ce,"CODE",{});var O$=r(Hi);qb=i(O$,"parmas"),O$.forEach(t),Ab=i(Ce," to "),Xi=n(Ce,"CODE",{});var U$=r(Xi);Cb=i(U$,"jax.numpy.float16"),U$.forEach(t),Ib=i(Ce,". This returns a new "),Bi=n(Ce,"CODE",{});var N$=r(Bi);Lb=i(N$,"params"),N$.forEach(t),zb=i(Ce,` tree and does not cast the `),Vi=n(Ce,"CODE",{});var H$=r(Vi);Ob=i(H$,"params"),H$.forEach(t),Ub=i(Ce," in place."),Ce.forEach(t),Nb=d(Ae),Si=n(Ae,"P",{});var X$=r(Si);Hb=i(X$,`This method can be used on GPU to explicitly convert the model parameters to float16 precision to do full half-precision training or to save weights in float16 for inference in order to save memory and improve speed.`),X$.forEach(t),Xb=d(Ae),Wi=n(Ae,"P",{});var B$=r(Wi);Bb=i(B$,"Examples:"),B$.forEach(t),Vb=d(Ae),p(Gn.$$.fragment,Ae),Ae.forEach(t),Sb=d(D),ae=n(D,"DIV",{class:!0});var no=r(ae);p(Rn.$$.fragment,no),Wb=d(no),J=n(no,"P",{});var Ie=r(J);Gb=i(Ie,"Cast the floating-point "),Gi=n(Ie,"CODE",{});var V$=r(Gi);Rb=i(V$,"parmas"),V$.forEach(t),Yb=i(Ie," to "),Ri=n(Ie,"CODE",{});var S$=r(Ri);Kb=i(S$,"jax.numpy.float32"),S$.forEach(t),Jb=i(Ie,`. This method can be used to explicitly convert the model parameters to fp32 precision. This returns a new `),Yi=n(Ie,"CODE",{});var W$=r(Yi);Qb=i(W$,"params"),W$.forEach(t),Zb=i(Ie," tree and does not cast the "),Ki=n(Ie,"CODE",{});var G$=r(Ki);ev=i(G$,"params"),G$.forEach(t),tv=i(Ie," in place."),Ie.forEach(t),ov=d(no),Ji=n(no,"P",{});var R$=r(Ji);nv=i(R$,"Examples:"),R$.forEach(t),rv=d(no),p(Yn.$$.fragment,no),no.forEach(t),D.forEach(t),Ol=d(a),Qe=n(a,"H2",{class:!0});var Yd=r(Qe);Jt=n(Yd,"A",{id:!0,class:!0,href:!0});var Y$=r(Jt);Qi=n(Y$,"SPAN",{});var K$=r(Qi);p(Kn.$$.fragment,K$),K$.forEach(t),Y$.forEach(t),av=d(Yd),Zi=n(Yd,"SPAN",{});var J$=r(Zi);sv=i(J$,"Pushing to the Hub"),J$.forEach(t),Yd.forEach(t),Ul=d(a),ce=n(a,"DIV",{class:!0});var Jr=r(ce);p(Jn.$$.fragment,Jr),iv=d(Jr),el=n(Jr,"P",{});var Q$=r(el);lv=i(Q$,"A Mixin containing the functionality to push a model or tokenizer to the hub."),Q$.forEach(t),dv=d(Jr),se=n(Jr,"DIV",{class:!0});var ro=r(se);p(Qn.$$.fragment,ro),mv=d(ro),Zn=n(ro,"P",{});var Kd=r(Zn);cv=i(Kd,`Upload the {object_files} to the \u{1F917} Model Hub while synchronizing a local clone of the repo in `),tl=n(Kd,"CODE",{});var Z$=r(tl);pv=i(Z$,"repo_path_or_name"),Z$.forEach(t),hv=i(Kd,"."),Kd.forEach(t),fv=d(ro),ol=n(ro,"P",{});var ew=r(ol);uv=i(ew,"Examples:"),ew.forEach(t),gv=d(ro),p(er.$$.fragment,ro),ro.forEach(t),Jr.forEach(t),this.h()},h(){m(w,"name","hf:doc:metadata"),m(w,"content",JSON.stringify(cw)),m(E,"id","models"),m(E,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(E,"href","#models"),m(x,"class","relative group"),m(sr,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),m(ir,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel"),m(lr,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel"),m(dr,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),m(mr,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel"),m(cr,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.modeling_utils.ModuleUtilsMixin"),m(pr,"href","/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin"),m(hr,"href","/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_tf_utils.TFGenerationMixin"),m(fr,"href","/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_flax_utils.FlaxGenerationMixin"),m(et,"id","transformers.PreTrainedModel"),m(et,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(et,"href","#transformers.PreTrainedModel"),m(ze,"class","relative group"),m(gr,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),m(_r,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),m(br,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),m(vr,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel"),m(ee,"class","docstring"),m(F,"class","docstring"),m(lt,"class","docstring"),m(dt,"class","docstring"),m(be,"class","docstring"),m(ve,"class","docstring"),m(mt,"class","docstring"),m(ct,"class","docstring"),m(pt,"class","docstring"),m(ye,"class","docstring"),m($e,"class","docstring"),m(ft,"class","docstring"),m(ut,"class","docstring"),m(we,"class","docstring"),m(T,"class","docstring"),m(yr,"id","from_pretrained-torch-dtype"),m(gt,"id","model-instantiation-dtype"),m(gt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(gt,"href","#model-instantiation-dtype"),m(Ne,"class","relative group"),m(bt,"id","transformers.modeling_utils.ModuleUtilsMixin"),m(bt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(bt,"href","#transformers.modeling_utils.ModuleUtilsMixin"),m(He,"class","relative group"),m(xe,"class","docstring"),m(vt,"class","docstring"),m(Vo,"href","https://arxiv.org/pdf/2001.08361.pdf"),m(Vo,"rel","nofollow"),m(yt,"class","docstring"),m($t,"class","docstring"),m(wt,"class","docstring"),m(Tt,"class","docstring"),m(xt,"class","docstring"),m(wr,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.modeling_utils.ModuleUtilsMixin.add_memory_hooks"),m(Pt,"class","docstring"),m(j,"class","docstring"),m(Et,"id","transformers.TFPreTrainedModel"),m(Et,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Et,"href","#transformers.TFPreTrainedModel"),m(Se,"class","relative group"),m(xr,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel"),m(Pr,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),m(Er,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),m(oe,"class","docstring"),m(kt,"class","docstring"),m(C,"class","docstring"),m(Dt,"class","docstring"),m(Ft,"class","docstring"),m(qt,"class","docstring"),m(At,"class","docstring"),m(Ct,"class","docstring"),m(It,"class","docstring"),m(Lt,"class","docstring"),m(zt,"class","docstring"),m(Ee,"class","docstring"),m(Mr,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained"),m(Ot,"class","docstring"),m(Ut,"class","docstring"),m(Nt,"class","docstring"),m(Ht,"class","docstring"),m(Xt,"class","docstring"),m(Bt,"class","docstring"),m(Vt,"class","docstring"),m(ne,"class","docstring"),m(b,"class","docstring"),m(St,"id","transformers.modeling_tf_utils.TFModelUtilsMixin"),m(St,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(St,"href","#transformers.modeling_tf_utils.TFModelUtilsMixin"),m(Ye,"class","relative group"),m(Wt,"class","docstring"),m(me,"class","docstring"),m(Gt,"id","transformers.FlaxPreTrainedModel"),m(Gt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Gt,"href","#transformers.FlaxPreTrainedModel"),m(Ke,"class","relative group"),m(jr,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel"),m(Dr,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),m(Fr,"href","/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig"),m(re,"class","docstring"),m(O,"class","docstring"),m(ke,"class","docstring"),m(Kt,"class","docstring"),m(X,"class","docstring"),m(B,"class","docstring"),m(ae,"class","docstring"),m(M,"class","docstring"),m(Jt,"id","transformers.file_utils.PushToHubMixin"),m(Jt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Jt,"href","#transformers.file_utils.PushToHubMixin"),m(Qe,"class","relative group"),m(se,"class","docstring"),m(ce,"class","docstring")},m(a,_){e(document.head,w),y(a,k,_),y(a,x,_),e(x,E),e(E,W),h(A,W,null),e(x,L),e(x,de),e(de,Jd),y(a,ul,_),y(a,Z,_),e(Z,Qd),e(Z,sr),e(sr,Zd),e(Z,em),e(Z,ir),e(ir,tm),e(Z,om),e(Z,lr),e(lr,nm),e(Z,rm),y(a,gl,_),y(a,Le,_),e(Le,dr),e(dr,am),e(Le,sm),e(Le,mr),e(mr,im),e(Le,lm),y(a,_l,_),y(a,Ze,_),e(Ze,Qr),e(Qr,dm),e(Ze,mm),e(Ze,Zr),e(Zr,cm),y(a,bl,_),y(a,z,_),e(z,pm),e(z,cr),e(cr,hm),e(z,fm),e(z,ea),e(ea,um),e(z,gm),e(z,pr),e(pr,_m),e(z,bm),e(z,hr),e(hr,vm),e(z,ym),e(z,fr),e(fr,$m),e(z,wm),y(a,vl,_),y(a,ze,_),e(ze,et),e(et,ta),h(so,ta,null),e(ze,Tm),e(ze,oa),e(oa,xm),y(a,yl,_),y(a,T,_),h(io,T,null),e(T,Pm),e(T,na),e(na,Em),e(T,Mm),e(T,ur),e(ur,gr),e(gr,km),e(ur,jm),e(T,Dm),e(T,lo),e(lo,ra),e(ra,Fm),e(lo,qm),e(lo,aa),e(aa,Am),e(T,Cm),e(T,sa),e(sa,Im),e(T,Lm),e(T,G),e(G,ia),e(ia,ge),e(ge,la),e(la,zm),e(ge,Om),e(ge,_r),e(_r,Um),e(ge,Nm),e(ge,br),e(br,Hm),e(ge,Xm),e(G,Bm),e(G,mo),e(mo,_e),e(_e,da),e(da,Vm),e(_e,Sm),e(_e,ma),e(ma,Wm),e(_e,Gm),e(_e,ca),e(ca,Rm),e(_e,Ym),e(mo,Km),e(mo,Oe),e(Oe,tt),e(tt,pa),e(pa,Jm),e(tt,Qm),e(tt,vr),e(vr,Zm),e(tt,ec),e(Oe,tc),e(Oe,ot),e(ot,ha),e(ha,oc),e(ot,nc),e(ot,fa),e(fa,rc),e(ot,ac),e(Oe,sc),e(Oe,nt),e(nt,ua),e(ua,ic),e(nt,lc),e(nt,ga),e(ga,dc),e(nt,mc),e(G,cc),e(G,_a),e(_a,rt),e(rt,ba),e(ba,pc),e(rt,hc),e(rt,va),e(va,fc),e(rt,uc),e(G,gc),e(G,ya),e(ya,at),e(at,$a),e($a,_c),e(at,bc),e(at,wa),e(wa,vc),e(at,yc),e(G,$c),e(G,Ta),e(Ta,U),e(U,xa),e(xa,wc),e(U,Tc),e(U,Pa),e(Pa,xc),e(U,Pc),e(U,Ea),e(Ea,Ec),e(U,Mc),e(U,Ma),e(Ma,kc),e(U,jc),e(U,ka),e(ka,Dc),e(U,Fc),e(T,qc),e(T,ee),h(co,ee,null),e(ee,Ac),e(ee,po),e(po,Cc),e(po,ja),e(ja,Ic),e(po,Lc),e(ee,zc),e(ee,Da),e(Da,Oc),e(ee,Uc),h(ho,ee,null),e(T,Nc),e(T,F),h(fo,F,null),e(F,Hc),e(F,Fa),e(Fa,Xc),e(F,Bc),e(F,Ue),e(Ue,Vc),e(Ue,qa),e(qa,Sc),e(Ue,Wc),e(Ue,Aa),e(Aa,Gc),e(Ue,Rc),e(F,Yc),e(F,uo),e(uo,Kc),e(uo,Ca),e(Ca,Jc),e(uo,Qc),e(F,Zc),e(F,go),e(go,ep),e(go,Ia),e(Ia,tp),e(go,op),e(F,np),h(st,F,null),e(F,rp),h(it,F,null),e(F,ap),e(F,La),e(La,sp),e(F,ip),h(_o,F,null),e(T,lp),e(T,lt),h(bo,lt,null),e(lt,dp),e(lt,za),e(za,mp),e(T,cp),e(T,dt),h(vo,dt,null),e(dt,pp),e(dt,Oa),e(Oa,hp),e(T,fp),e(T,be),h(yo,be,null),e(be,up),e(be,Ua),e(Ua,gp),e(be,_p),e(be,Na),e(Na,bp),e(T,vp),e(T,ve),h($o,ve,null),e(ve,yp),e(ve,Ha),e(Ha,$p),e(ve,wp),e(ve,Xa),e(Xa,Tp),e(T,xp),e(T,mt),h(wo,mt,null),e(mt,Pp),e(mt,Ba),e(Ba,Ep),e(T,Mp),e(T,ct),h(To,ct,null),e(ct,kp),e(ct,Va),e(Va,jp),e(T,Dp),e(T,pt),h(xo,pt,null),e(pt,Fp),e(pt,Sa),e(Sa,qp),e(T,Ap),e(T,ye),h(Po,ye,null),e(ye,Cp),e(ye,Wa),e(Wa,Ip),e(ye,Lp),h(ht,ye,null),e(T,zp),e(T,$e),h(Eo,$e,null),e($e,Op),e($e,Mo),e(Mo,Up),e(Mo,Ga),e(Ga,Np),e(Mo,Hp),e($e,Xp),e($e,ko),e(ko,Bp),e(ko,Ra),e(Ra,Vp),e(ko,Sp),e(T,Wp),e(T,ft),h(jo,ft,null),e(ft,Gp),e(ft,Do),e(Do,Rp),e(Do,Ya),e(Ya,Yp),e(Do,Kp),e(T,Jp),e(T,ut),h(Fo,ut,null),e(ut,Qp),e(ut,Ka),e(Ka,Zp),e(T,eh),e(T,we),h(qo,we,null),e(we,th),e(we,Ja),e(Ja,oh),e(we,nh),e(we,Ao),e(Ao,rh),e(Ao,Qa),e(Qa,ah),e(Ao,sh),y(a,$l,_),y(a,yr,_),y(a,wl,_),y(a,Ne,_),e(Ne,gt),e(gt,Za),h(Co,Za,null),e(Ne,ih),e(Ne,es),e(es,lh),y(a,Tl,_),y(a,te,_),e(te,dh),e(te,ts),e(ts,mh),e(te,ch),e(te,os),e(os,ph),e(te,hh),e(te,ns),e(ns,fh),e(te,uh),y(a,xl,_),h(Io,a,_),y(a,Pl,_),y(a,Te,_),e(Te,gh),e(Te,rs),e(rs,_h),e(Te,bh),e(Te,as),e(as,vh),e(Te,yh),y(a,El,_),h(Lo,a,_),y(a,Ml,_),y(a,_t,_),e(_t,$h),e(_t,ss),e(ss,wh),e(_t,Th),y(a,kl,_),h(zo,a,_),y(a,jl,_),y(a,$r,_),e($r,xh),y(a,Dl,_),y(a,He,_),e(He,bt),e(bt,is),h(Oo,is,null),e(He,Ph),e(He,ls),e(ls,Eh),y(a,Fl,_),y(a,j,_),h(Uo,j,null),e(j,Mh),e(j,No),e(No,kh),e(No,ds),e(ds,jh),e(No,Dh),e(j,Fh),e(j,xe),h(Ho,xe,null),e(xe,qh),e(xe,ms),e(ms,Ah),e(xe,Ch),e(xe,Xe),e(Xe,Ih),e(Xe,cs),e(cs,Lh),e(Xe,zh),e(Xe,ps),e(ps,Oh),e(Xe,Uh),e(j,Nh),e(j,vt),h(Xo,vt,null),e(vt,Hh),e(vt,hs),e(hs,Xh),e(j,Bh),e(j,yt),h(Bo,yt,null),e(yt,Vh),e(yt,Be),e(Be,Sh),e(Be,fs),e(fs,Wh),e(Be,Gh),e(Be,Vo),e(Vo,Rh),e(Be,Yh),e(j,Kh),e(j,$t),h(So,$t,null),e($t,Jh),e($t,us),e(us,Qh),e(j,Zh),e(j,wt),h(Wo,wt,null),e(wt,ef),e(wt,gs),e(gs,tf),e(j,of),e(j,Tt),h(Go,Tt,null),e(Tt,nf),e(Tt,_s),e(_s,rf),e(j,af),e(j,xt),h(Ro,xt,null),e(xt,sf),e(xt,bs),e(bs,lf),e(j,df),e(j,Pt),h(Yo,Pt,null),e(Pt,mf),e(Pt,Ve),e(Ve,cf),e(Ve,vs),e(vs,pf),e(Ve,hf),e(Ve,wr),e(wr,ff),e(Ve,uf),y(a,ql,_),y(a,Se,_),e(Se,Et),e(Et,ys),h(Ko,ys,null),e(Se,gf),e(Se,$s),e($s,_f),y(a,Al,_),y(a,b,_),h(Jo,b,null),e(b,bf),e(b,ws),e(ws,vf),e(b,yf),e(b,Tr),e(Tr,xr),e(xr,$f),e(Tr,wf),e(b,Tf),e(b,Qo),e(Qo,Ts),e(Ts,xf),e(Qo,Pf),e(Qo,xs),e(xs,Ef),e(b,Mf),e(b,Ps),e(Ps,kf),e(b,jf),e(b,We),e(We,Pe),e(Pe,Es),e(Es,Df),e(Pe,Ff),e(Pe,Pr),e(Pr,qf),e(Pe,Af),e(Pe,Er),e(Er,Cf),e(Pe,If),e(We,Lf),e(We,Mt),e(Mt,Ms),e(Ms,zf),e(Mt,Of),e(Mt,ks),e(ks,Uf),e(Mt,Nf),e(We,Hf),e(We,N),e(N,js),e(js,Xf),e(N,Bf),e(N,Ds),e(Ds,Vf),e(N,Sf),e(N,Fs),e(Fs,Wf),e(N,Gf),e(N,qs),e(qs,Rf),e(N,Yf),e(N,As),e(As,Kf),e(N,Jf),e(b,Qf),e(b,oe),h(Zo,oe,null),e(oe,Zf),e(oe,en),e(en,eu),e(en,Cs),e(Cs,tu),e(en,ou),e(oe,nu),e(oe,Is),e(Is,ru),e(oe,au),h(tn,oe,null),e(b,su),e(b,kt),h(on,kt,null),e(kt,iu),e(kt,Ls),e(Ls,lu),e(b,du),e(b,C),h(nn,C,null),e(C,mu),e(C,zs),e(zs,cu),e(C,pu),e(C,rn),e(rn,hu),e(rn,Os),e(Os,fu),e(rn,uu),e(C,gu),e(C,an),e(an,_u),e(an,Us),e(Us,bu),e(an,vu),e(C,yu),h(jt,C,null),e(C,$u),e(C,Ns),e(Ns,wu),e(C,Tu),h(sn,C,null),e(b,xu),e(b,Dt),h(ln,Dt,null),e(Dt,Pu),e(Dt,Hs),e(Hs,Eu),e(b,Mu),e(b,Ft),h(dn,Ft,null),e(Ft,ku),e(Ft,Xs),e(Xs,ju),e(b,Du),e(b,qt),h(mn,qt,null),e(qt,Fu),e(qt,Bs),e(Bs,qu),e(b,Au),e(b,At),h(cn,At,null),e(At,Cu),e(At,Vs),e(Vs,Iu),e(b,Lu),e(b,Ct),h(pn,Ct,null),e(Ct,zu),e(Ct,Ss),e(Ss,Ou),e(b,Uu),e(b,It),h(hn,It,null),e(It,Nu),e(It,Ws),e(Ws,Hu),e(b,Xu),e(b,Lt),h(fn,Lt,null),e(Lt,Bu),e(Lt,Gs),e(Gs,Vu),e(b,Su),e(b,zt),h(un,zt,null),e(zt,Wu),e(zt,Rs),e(Rs,Gu),e(b,Ru),e(b,Ee),h(gn,Ee,null),e(Ee,Yu),e(Ee,_n),e(_n,Ku),e(_n,Ys),e(Ys,Ju),e(_n,Qu),e(Ee,Zu),e(Ee,bn),e(bn,eg),e(bn,Ks),e(Ks,tg),e(bn,og),e(b,ng),e(b,Ot),h(vn,Ot,null),e(Ot,rg),e(Ot,yn),e(yn,ag),e(yn,Mr),e(Mr,sg),e(yn,ig),e(b,lg),e(b,Ut),h($n,Ut,null),e(Ut,dg),e(Ut,Js),e(Js,mg),e(b,cg),e(b,Nt),h(wn,Nt,null),e(Nt,pg),e(Nt,Qs),e(Qs,hg),e(b,fg),e(b,Ht),h(Tn,Ht,null),e(Ht,ug),e(Ht,Zs),e(Zs,gg),e(b,_g),e(b,Xt),h(xn,Xt,null),e(Xt,bg),e(Xt,ei),e(ei,vg),e(b,yg),e(b,Bt),h(Pn,Bt,null),e(Bt,$g),e(Bt,ti),e(ti,wg),e(b,Tg),e(b,Vt),h(En,Vt,null),e(Vt,xg),e(Vt,oi),e(oi,Pg),e(b,Eg),e(b,ne),h(Mn,ne,null),e(ne,Mg),e(ne,R),e(R,kg),e(R,ni),e(ni,jg),e(R,Dg),e(R,ri),e(ri,Fg),e(R,qg),e(R,ai),e(ai,Ag),e(R,Cg),e(R,si),e(si,Ig),e(R,Lg),e(ne,zg),e(ne,Ge),e(Ge,Og),e(Ge,ii),e(ii,Ug),e(Ge,Ng),e(Ge,li),e(li,Hg),e(Ge,Xg),e(ne,Bg),e(ne,Re),e(Re,Vg),e(Re,di),e(di,Sg),e(Re,Wg),e(Re,mi),e(mi,Gg),e(Re,Rg),y(a,Cl,_),y(a,Ye,_),e(Ye,St),e(St,ci),h(kn,ci,null),e(Ye,Yg),e(Ye,pi),e(pi,Kg),y(a,Il,_),y(a,me,_),h(jn,me,null),e(me,Jg),e(me,Dn),e(Dn,Qg),e(Dn,hi),e(hi,Zg),e(Dn,e_),e(me,t_),e(me,Wt),h(Fn,Wt,null),e(Wt,o_),e(Wt,fi),e(fi,n_),y(a,Ll,_),y(a,Ke,_),e(Ke,Gt),e(Gt,ui),h(qn,ui,null),e(Ke,r_),e(Ke,gi),e(gi,a_),y(a,zl,_),y(a,M,_),h(An,M,null),e(M,s_),e(M,_i),e(_i,i_),e(M,l_),e(M,kr),e(kr,jr),e(jr,d_),e(kr,m_),e(M,c_),e(M,bi),e(bi,p_),e(M,h_),e(M,Je),e(Je,Me),e(Me,vi),e(vi,f_),e(Me,u_),e(Me,Dr),e(Dr,g_),e(Me,__),e(Me,Fr),e(Fr,b_),e(Me,v_),e(Je,y_),e(Je,Rt),e(Rt,yi),e(yi,$_),e(Rt,w_),e(Rt,$i),e($i,T_),e(Rt,x_),e(Je,P_),e(Je,H),e(H,wi),e(wi,E_),e(H,M_),e(H,Ti),e(Ti,k_),e(H,j_),e(H,xi),e(xi,D_),e(H,F_),e(H,Pi),e(Pi,q_),e(H,A_),e(H,Ei),e(Ei,C_),e(H,I_),e(M,L_),e(M,re),h(Cn,re,null),e(re,z_),e(re,In),e(In,O_),e(In,Mi),e(Mi,U_),e(In,N_),e(re,H_),e(re,ki),e(ki,X_),e(re,B_),h(Ln,re,null),e(M,V_),e(M,O),h(zn,O,null),e(O,S_),e(O,ji),e(ji,W_),e(O,G_),e(O,On),e(On,R_),e(On,Di),e(Di,Y_),e(On,K_),e(O,J_),e(O,Un),e(Un,Q_),e(Un,Fi),e(Fi,Z_),e(Un,eb),e(O,tb),e(O,qi),e(qi,ob),e(O,nb),h(Nn,O,null),e(M,rb),e(M,ke),h(Hn,ke,null),e(ke,ab),e(ke,Ai),e(Ai,sb),e(ke,ib),h(Yt,ke,null),e(M,lb),e(M,Kt),h(Xn,Kt,null),e(Kt,db),e(Kt,Bn),e(Bn,mb),e(Bn,Ci),e(Ci,cb),e(Bn,pb),e(M,hb),e(M,X),h(Vn,X,null),e(X,fb),e(X,Y),e(Y,ub),e(Y,Ii),e(Ii,gb),e(Y,_b),e(Y,Li),e(Li,bb),e(Y,vb),e(Y,zi),e(zi,yb),e(Y,$b),e(Y,Oi),e(Oi,wb),e(Y,Tb),e(X,xb),e(X,Ui),e(Ui,Pb),e(X,Eb),e(X,Ni),e(Ni,Mb),e(X,kb),h(Sn,X,null),e(M,jb),e(M,B),h(Wn,B,null),e(B,Db),e(B,K),e(K,Fb),e(K,Hi),e(Hi,qb),e(K,Ab),e(K,Xi),e(Xi,Cb),e(K,Ib),e(K,Bi),e(Bi,Lb),e(K,zb),e(K,Vi),e(Vi,Ob),e(K,Ub),e(B,Nb),e(B,Si),e(Si,Hb),e(B,Xb),e(B,Wi),e(Wi,Bb),e(B,Vb),h(Gn,B,null),e(M,Sb),e(M,ae),h(Rn,ae,null),e(ae,Wb),e(ae,J),e(J,Gb),e(J,Gi),e(Gi,Rb),e(J,Yb),e(J,Ri),e(Ri,Kb),e(J,Jb),e(J,Yi),e(Yi,Qb),e(J,Zb),e(J,Ki),e(Ki,ev),e(J,tv),e(ae,ov),e(ae,Ji),e(Ji,nv),e(ae,rv),h(Yn,ae,null),y(a,Ol,_),y(a,Qe,_),e(Qe,Jt),e(Jt,Qi),h(Kn,Qi,null),e(Qe,av),e(Qe,Zi),e(Zi,sv),y(a,Ul,_),y(a,ce,_),h(Jn,ce,null),e(ce,iv),e(ce,el),e(el,lv),e(ce,dv),e(ce,se),h(Qn,se,null),e(se,mv),e(se,Zn),e(Zn,cv),e(Zn,tl),e(tl,pv),e(Zn,hv),e(se,fv),e(se,ol),e(ol,uv),e(se,gv),h(er,se,null),Nl=!0},p(a,[_]){const tr={};_&2&&(tr.$$scope={dirty:_,ctx:a}),st.$set(tr);const nl={};_&2&&(nl.$$scope={dirty:_,ctx:a}),it.$set(nl);const rl={};_&2&&(rl.$$scope={dirty:_,ctx:a}),ht.$set(rl);const al={};_&2&&(al.$$scope={dirty:_,ctx:a}),jt.$set(al);const pe={};_&2&&(pe.$$scope={dirty:_,ctx:a}),Yt.$set(pe)},i(a){Nl||(f(A.$$.fragment,a),f(so.$$.fragment,a),f(io.$$.fragment,a),f(co.$$.fragment,a),f(ho.$$.fragment,a),f(fo.$$.fragment,a),f(st.$$.fragment,a),f(it.$$.fragment,a),f(_o.$$.fragment,a),f(bo.$$.fragment,a),f(vo.$$.fragment,a),f(yo.$$.fragment,a),f($o.$$.fragment,a),f(wo.$$.fragment,a),f(To.$$.fragment,a),f(xo.$$.fragment,a),f(Po.$$.fragment,a),f(ht.$$.fragment,a),f(Eo.$$.fragment,a),f(jo.$$.fragment,a),f(Fo.$$.fragment,a),f(qo.$$.fragment,a),f(Co.$$.fragment,a),f(Io.$$.fragment,a),f(Lo.$$.fragment,a),f(zo.$$.fragment,a),f(Oo.$$.fragment,a),f(Uo.$$.fragment,a),f(Ho.$$.fragment,a),f(Xo.$$.fragment,a),f(Bo.$$.fragment,a),f(So.$$.fragment,a),f(Wo.$$.fragment,a),f(Go.$$.fragment,a),f(Ro.$$.fragment,a),f(Yo.$$.fragment,a),f(Ko.$$.fragment,a),f(Jo.$$.fragment,a),f(Zo.$$.fragment,a),f(tn.$$.fragment,a),f(on.$$.fragment,a),f(nn.$$.fragment,a),f(jt.$$.fragment,a),f(sn.$$.fragment,a),f(ln.$$.fragment,a),f(dn.$$.fragment,a),f(mn.$$.fragment,a),f(cn.$$.fragment,a),f(pn.$$.fragment,a),f(hn.$$.fragment,a),f(fn.$$.fragment,a),f(un.$$.fragment,a),f(gn.$$.fragment,a),f(vn.$$.fragment,a),f($n.$$.fragment,a),f(wn.$$.fragment,a),f(Tn.$$.fragment,a),f(xn.$$.fragment,a),f(Pn.$$.fragment,a),f(En.$$.fragment,a),f(Mn.$$.fragment,a),f(kn.$$.fragment,a),f(jn.$$.fragment,a),f(Fn.$$.fragment,a),f(qn.$$.fragment,a),f(An.$$.fragment,a),f(Cn.$$.fragment,a),f(Ln.$$.fragment,a),f(zn.$$.fragment,a),f(Nn.$$.fragment,a),f(Hn.$$.fragment,a),f(Yt.$$.fragment,a),f(Xn.$$.fragment,a),f(Vn.$$.fragment,a),f(Sn.$$.fragment,a),f(Wn.$$.fragment,a),f(Gn.$$.fragment,a),f(Rn.$$.fragment,a),f(Yn.$$.fragment,a),f(Kn.$$.fragment,a),f(Jn.$$.fragment,a),f(Qn.$$.fragment,a),f(er.$$.fragment,a),Nl=!0)},o(a){u(A.$$.fragment,a),u(so.$$.fragment,a),u(io.$$.fragment,a),u(co.$$.fragment,a),u(ho.$$.fragment,a),u(fo.$$.fragment,a),u(st.$$.fragment,a),u(it.$$.fragment,a),u(_o.$$.fragment,a),u(bo.$$.fragment,a),u(vo.$$.fragment,a),u(yo.$$.fragment,a),u($o.$$.fragment,a),u(wo.$$.fragment,a),u(To.$$.fragment,a),u(xo.$$.fragment,a),u(Po.$$.fragment,a),u(ht.$$.fragment,a),u(Eo.$$.fragment,a),u(jo.$$.fragment,a),u(Fo.$$.fragment,a),u(qo.$$.fragment,a),u(Co.$$.fragment,a),u(Io.$$.fragment,a),u(Lo.$$.fragment,a),u(zo.$$.fragment,a),u(Oo.$$.fragment,a),u(Uo.$$.fragment,a),u(Ho.$$.fragment,a),u(Xo.$$.fragment,a),u(Bo.$$.fragment,a),u(So.$$.fragment,a),u(Wo.$$.fragment,a),u(Go.$$.fragment,a),u(Ro.$$.fragment,a),u(Yo.$$.fragment,a),u(Ko.$$.fragment,a),u(Jo.$$.fragment,a),u(Zo.$$.fragment,a),u(tn.$$.fragment,a),u(on.$$.fragment,a),u(nn.$$.fragment,a),u(jt.$$.fragment,a),u(sn.$$.fragment,a),u(ln.$$.fragment,a),u(dn.$$.fragment,a),u(mn.$$.fragment,a),u(cn.$$.fragment,a),u(pn.$$.fragment,a),u(hn.$$.fragment,a),u(fn.$$.fragment,a),u(un.$$.fragment,a),u(gn.$$.fragment,a),u(vn.$$.fragment,a),u($n.$$.fragment,a),u(wn.$$.fragment,a),u(Tn.$$.fragment,a),u(xn.$$.fragment,a),u(Pn.$$.fragment,a),u(En.$$.fragment,a),u(Mn.$$.fragment,a),u(kn.$$.fragment,a),u(jn.$$.fragment,a),u(Fn.$$.fragment,a),u(qn.$$.fragment,a),u(An.$$.fragment,a),u(Cn.$$.fragment,a),u(Ln.$$.fragment,a),u(zn.$$.fragment,a),u(Nn.$$.fragment,a),u(Hn.$$.fragment,a),u(Yt.$$.fragment,a),u(Xn.$$.fragment,a),u(Vn.$$.fragment,a),u(Sn.$$.fragment,a),u(Wn.$$.fragment,a),u(Gn.$$.fragment,a),u(Rn.$$.fragment,a),u(Yn.$$.fragment,a),u(Kn.$$.fragment,a),u(Jn.$$.fragment,a),u(Qn.$$.fragment,a),u(er.$$.fragment,a),Nl=!1},d(a){t(w),a&&t(k),a&&t(x),g(A),a&&t(ul),a&&t(Z),a&&t(gl),a&&t(Le),a&&t(_l),a&&t(Ze),a&&t(bl),a&&t(z),a&&t(vl),a&&t(ze),g(so),a&&t(yl),a&&t(T),g(io),g(co),g(ho),g(fo),g(st),g(it),g(_o),g(bo),g(vo),g(yo),g($o),g(wo),g(To),g(xo),g(Po),g(ht),g(Eo),g(jo),g(Fo),g(qo),a&&t($l),a&&t(yr),a&&t(wl),a&&t(Ne),g(Co),a&&t(Tl),a&&t(te),a&&t(xl),g(Io,a),a&&t(Pl),a&&t(Te),a&&t(El),g(Lo,a),a&&t(Ml),a&&t(_t),a&&t(kl),g(zo,a),a&&t(jl),a&&t($r),a&&t(Dl),a&&t(He),g(Oo),a&&t(Fl),a&&t(j),g(Uo),g(Ho),g(Xo),g(Bo),g(So),g(Wo),g(Go),g(Ro),g(Yo),a&&t(ql),a&&t(Se),g(Ko),a&&t(Al),a&&t(b),g(Jo),g(Zo),g(tn),g(on),g(nn),g(jt),g(sn),g(ln),g(dn),g(mn),g(cn),g(pn),g(hn),g(fn),g(un),g(gn),g(vn),g($n),g(wn),g(Tn),g(xn),g(Pn),g(En),g(Mn),a&&t(Cl),a&&t(Ye),g(kn),a&&t(Il),a&&t(me),g(jn),g(Fn),a&&t(Ll),a&&t(Ke),g(qn),a&&t(zl),a&&t(M),g(An),g(Cn),g(Ln),g(zn),g(Nn),g(Hn),g(Yt),g(Xn),g(Vn),g(Sn),g(Wn),g(Gn),g(Rn),g(Yn),a&&t(Ol),a&&t(Qe),g(Kn),a&&t(Ul),a&&t(ce),g(Jn),g(Qn),g(er)}}}const cw={local:"models",sections:[{local:"transformers.PreTrainedModel",sections:[{local:"model-instantiation-dtype",title:"Model Instantiation dtype"}],title:"PreTrainedModel"},{local:"transformers.modeling_utils.ModuleUtilsMixin",title:"ModuleUtilsMixin"},{local:"transformers.TFPreTrainedModel",title:"TFPreTrainedModel"},{local:"transformers.modeling_tf_utils.TFModelUtilsMixin",title:"TFModelUtilsMixin"},{local:"transformers.FlaxPreTrainedModel",title:"FlaxPreTrainedModel"},{local:"transformers.file_utils.PushToHubMixin",title:"Pushing to the Hub"}],title:"Models"};function pw(S,w,k){let{fw:x}=w;return S.$$set=E=>{"fw"in E&&k(0,x=E.fw)},[x]}class vw extends tw{constructor(w){super();ow(this,w,pw,mw,nw,{fw:0})}}export{vw as default,cw as metadata};
402
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/main_classes/callback.mdx-9aed4ad7.js
import{S as np,i as sp,s as op,e as s,k as c,w as _,t as r,M as lp,c as o,d as t,m as f,a as l,x as u,h as n,b as i,F as e,g as m,y as v,q as b,o as E,B as k}from"../../chunks/vendor-4833417e.js";import{T as ip}from"../../chunks/Tip-fffd6df1.js";import{D as T}from"../../chunks/Docstring-4f315ed9.js";import{C as Wh}from"../../chunks/CodeBlock-6a3d1b46.js";import{I as Pn}from"../../chunks/IconCopyLink-4b81c553.js";import"../../chunks/CopyButton-dacfbfaf.js";function cp(hr){let w,Y,D,P,Z,M,et,ee;return{c(){w=s("p"),Y=r(`In all this class, one step is to be understood as one update step. When using gradient accumulation, one update step may require several forward and backward passes: if you use `),D=s("code"),P=r("gradient_accumulation_steps=n"),Z=r(`, then one update step requires going through `),M=s("em"),et=r("n"),ee=r(" batches.")},l(te){w=o(te,"P",{});var x=l(w);Y=n(x,`In all this class, one step is to be understood as one update step. When using gradient accumulation, one update step may require several forward and backward passes: if you use `),D=o(x,"CODE",{});var q=l(D);P=n(q,"gradient_accumulation_steps=n"),q.forEach(t),Z=n(x,`, then one update step requires going through `),M=o(x,"EM",{});var fa=l(M);et=n(fa,"n"),fa.forEach(t),ee=n(x," batches."),x.forEach(t)},m(te,x){m(te,w,x),e(w,Y),e(w,D),e(D,P),e(w,Z),e(w,M),e(M,et),e(w,ee)},d(te){te&&t(w)}}}function fp(hr){let w,Y,D,P,Z,M,et,ee,te,x,q,fa,ha,Gs,Js,On,j,Ys,ma,Xs,Ks,da,Qs,Zs,pa,eo,to,In,ke,ao,ga,ro,no,Mn,y,_a,ua,so,oo,lo,X,va,io,co,ba,fo,ho,Ea,mo,po,go,ka,$a,_o,uo,vo,$e,Ta,bo,Eo,tt,ko,$o,To,Te,Ca,Co,wo,at,yo,Ao,Do,Ce,wa,Lo,So,rt,Po,Oo,Io,we,ya,Mo,xo,nt,Fo,jo,No,ye,Aa,Wo,zo,st,Bo,Vo,xn,L,qo,Da,Ro,Ho,La,Uo,Go,Sa,Jo,Yo,Pa,Xo,Ko,Oa,Qo,Zo,Fn,ae,Ae,mr,ot,el,dr,tl,jn,De,al,Ia,rl,nl,Nn,R,lt,sl,re,ol,Ma,ll,il,it,cl,fl,hl,N,ct,ml,pr,dl,pl,C,gl,gr,_l,ul,_r,vl,bl,ur,El,kl,vr,$l,Tl,br,Cl,wl,Er,yl,Al,kr,Dl,Ll,$r,Sl,Pl,Tr,Ol,Il,Ml,ft,xl,ht,Fl,jl,Wn,ne,mt,Nl,dt,Wl,xa,zl,Bl,zn,se,pt,Vl,gt,ql,Fa,Rl,Hl,Bn,oe,_t,Ul,ut,Gl,ja,Jl,Yl,Vn,H,vt,Xl,bt,Kl,Na,Ql,Zl,ei,U,ti,Wa,ai,ri,Cr,ni,si,za,oi,li,qn,le,Et,ii,ie,ci,Ba,fi,hi,kt,mi,di,Rn,G,$t,pi,ce,gi,Va,_i,ui,Tt,vi,bi,Ei,W,Ct,ki,wt,$i,wr,Ti,Ci,wi,yt,yi,At,Ai,Di,Li,d,Si,yr,Pi,Oi,Ar,Ii,Mi,Dr,xi,Fi,Lr,ji,Ni,Sr,Wi,zi,Pr,Bi,Vi,Or,qi,Ri,Ir,Hi,Ui,Mr,Gi,Ji,xr,Yi,Xi,Fr,Ki,Qi,jr,Zi,ec,Nr,tc,ac,Wr,rc,nc,zr,sc,oc,Br,lc,ic,Vr,cc,fc,qr,hc,mc,Rr,dc,pc,Hn,J,Dt,gc,fe,_c,qa,uc,vc,Lt,bc,Ec,kc,z,St,$c,Hr,Tc,Cc,he,wc,Ur,yc,Ac,Gr,Dc,Lc,Sc,F,Pc,Jr,Oc,Ic,Yr,Mc,xc,Ra,Fc,jc,Xr,Nc,Wc,Un,me,Pt,zc,de,Bc,Ha,Vc,qc,Ot,Rc,Hc,Gn,pe,It,Uc,Mt,Gc,Ua,Jc,Yc,Jn,ge,Le,Kr,xt,Xc,Qr,Kc,Yn,p,Ft,Qc,Zr,Zc,ef,jt,tf,en,af,rf,nf,O,sf,tn,of,lf,an,cf,ff,rn,hf,mf,nn,df,pf,sn,gf,_f,uf,on,vf,bf,Nt,Ef,Se,Wt,kf,ln,$f,Tf,Pe,zt,Cf,cn,wf,yf,Oe,Bt,Af,fn,Df,Lf,Ie,Vt,Sf,qt,Pf,Ga,Of,If,Mf,Me,Rt,xf,hn,Ff,jf,xe,Ht,Nf,mn,Wf,zf,Fe,Ut,Bf,dn,Vf,qf,je,Gt,Rf,pn,Hf,Uf,Ne,Jt,Gf,gn,Jf,Yf,We,Yt,Xf,_n,Kf,Qf,ze,Xt,Zf,un,eh,th,Be,Kt,ah,vn,rh,Xn,Ve,nh,Ja,sh,oh,Kn,Qt,Qn,qe,lh,bn,ih,ch,Zn,Zt,es,_e,Re,En,ea,fh,kn,hh,ts,I,ta,mh,ue,dh,Ya,ph,gh,Xa,_h,uh,vh,He,bh,Ue,aa,Eh,ra,kh,$n,$h,Th,Ch,Ge,na,wh,sa,yh,Tn,Ah,Dh,as,ve,Je,Cn,oa,Lh,wn,Sh,rs,be,la,Ph,Ee,Oh,Ka,Ih,Mh,Qa,xh,Fh,ns;return M=new Pn({}),ot=new Pn({}),lt=new T({props:{name:"class transformers.integrations.CometCallback",anchor:"transformers.integrations.CometCallback",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/integrations.py#L660"}}),ct=new T({props:{name:"setup",anchor:"transformers.integrations.CometCallback.setup",parameters:[{name:"args",val:""},{name:"state",val:""},{name:"model",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/integrations.py#L671"}}),mt=new T({props:{name:"class transformers.DefaultFlowCallback",anchor:"transformers.DefaultFlowCallback",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_callback.py#L406"}}),pt=new T({props:{name:"class transformers.PrinterCallback",anchor:"transformers.PrinterCallback",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_callback.py#L494"}}),_t=new T({props:{name:"class transformers.ProgressCallback",anchor:"transformers.ProgressCallback",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_callback.py#L452"}}),vt=new T({props:{name:"class transformers.EarlyStoppingCallback",anchor:"transformers.EarlyStoppingCallback",parameters:[{name:"early_stopping_patience",val:": int = 1"},{name:"early_stopping_threshold",val:": typing.Optional[float] = 0.0"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_callback.py#L505",parametersDescription:[{anchor:"transformers.EarlyStoppingCallback.early_stopping_patience",description:`<strong>early_stopping_patience</strong> (<code>int</code>) &#x2014; Use with <code>metric_for_best_model</code> to stop training when the specified metric worsens for <code>early_stopping_patience</code> evaluation calls.`,name:"early_stopping_patience"},{anchor:"transformers.EarlyStoppingCallback.early_stopping_threshold(float,",description:`<strong>early_stopping_threshold(<code>float</code>,</strong> <em>optional</em>) &#x2014; Use with TrainingArguments <code>metric_for_best_model</code> and <code>early_stopping_patience</code> to denote how much the specified metric must improve to satisfy early stopping conditions. \``,name:"early_stopping_threshold(float,"}]}}),Et=new T({props:{name:"class transformers.integrations.TensorBoardCallback",anchor:"transformers.integrations.TensorBoardCallback",parameters:[{name:"tb_writer",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/integrations.py#L446",parametersDescription:[{anchor:"transformers.integrations.TensorBoardCallback.tb_writer",description:`<strong>tb_writer</strong> (<code>SummaryWriter</code>, <em>optional</em>) &#x2014; The writer to use. Will instantiate one if not set.`,name:"tb_writer"}]}}),$t=new T({props:{name:"class transformers.integrations.WandbCallback",anchor:"transformers.integrations.WandbCallback",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/integrations.py#L534"}}),Ct=new T({props:{name:"setup",anchor:"transformers.integrations.WandbCallback.setup",parameters:[{name:"args",val:""},{name:"state",val:""},{name:"model",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/integrations.py#L551"}}),Dt=new T({props:{name:"class transformers.integrations.MLflowCallback",anchor:"transformers.integrations.MLflowCallback",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/integrations.py#L759"}}),St=new T({props:{name:"setup",anchor:"transformers.integrations.MLflowCallback.setup",parameters:[{name:"args",val:""},{name:"state",val:""},{name:"model",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/integrations.py#L776"}}),Pt=new T({props:{name:"class transformers.integrations.AzureMLCallback",anchor:"transformers.integrations.AzureMLCallback",parameters:[{name:"azureml_run",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/integrations.py#L736"}}),It=new T({props:{name:"class transformers.integrations.CodeCarbonCallback",anchor:"transformers.integrations.CodeCarbonCallback",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/integrations.py#L919"}}),xt=new Pn({}),Ft=new T({props:{name:"class transformers.TrainerCallback",anchor:"transformers.TrainerCallback",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_callback.py#L159",parametersDescription:[{anchor:"transformers.TrainerCallback.args",description:`<strong>args</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a>) &#x2014; The training arguments used to instantiate the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>.`,name:"args"},{anchor:"transformers.TrainerCallback.state",description:`<strong>state</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/callback#transformers.TrainerState">TrainerState</a>) &#x2014; The current state of the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>.`,name:"state"},{anchor:"transformers.TrainerCallback.control",description:`<strong>control</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/callback#transformers.TrainerControl">TrainerControl</a>) &#x2014; The object that is returned to the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> and can be used to make some decisions.`,name:"control"},{anchor:"transformers.TrainerCallback.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <code>torch.nn.Module</code>) &#x2014; The model being trained.`,name:"model"},{anchor:"transformers.TrainerCallback.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer used for encoding the data.`,name:"tokenizer"},{anchor:"transformers.TrainerCallback.optimizer",description:`<strong>optimizer</strong> (<code>torch.optim.Optimizer</code>) &#x2014; The optimizer used for the training steps.`,name:"optimizer"},{anchor:"transformers.TrainerCallback.lr_scheduler",description:`<strong>lr_scheduler</strong> (<code>torch.optim.lr_scheduler.LambdaLR</code>) &#x2014; The scheduler used for setting the learning rate.`,name:"lr_scheduler"},{anchor:"transformers.TrainerCallback.train_dataloader",description:`<strong>train_dataloader</strong> (<code>torch.utils.data.DataLoader</code>, <em>optional</em>) &#x2014; The current dataloader used for training.`,name:"train_dataloader"},{anchor:"transformers.TrainerCallback.eval_dataloader",description:`<strong>eval_dataloader</strong> (<code>torch.utils.data.DataLoader</code>, <em>optional</em>) &#x2014; The current dataloader used for training.`,name:"eval_dataloader"},{anchor:"transformers.TrainerCallback.metrics",description:`<strong>metrics</strong> (<code>Dict[str, float]</code>) &#x2014; The metrics computed by the last evaluation phase.</p> <p>Those are only accessible in the event <code>on_evaluate</code>.`,name:"metrics"},{anchor:"transformers.TrainerCallback.logs",description:`<strong>logs</strong> (<code>Dict[str, float]</code>) &#x2014; The values to log.</p> <p>Those are only accessible in the event <code>on_log</code>.`,name:"logs"}]}}),Nt=new Wh({props:{code:`class PrinterCallback(TrainerCallback): def on_log(self, args, state, control, logs=None, **kwargs): _ = logs.pop("total_flos", None) if state.is_local_process_zero: print(logs)`,highlighted:`<span class="hljs-keyword">class</span> <span class="hljs-title class_">PrinterCallback</span>(<span class="hljs-title class_ inherited__">TrainerCallback</span>): <span class="hljs-keyword">def</span> <span class="hljs-title function_">on_log</span>(<span class="hljs-params">self, args, state, control, logs=<span class="hljs-literal">None</span>, **kwargs</span>): _ = logs.pop(<span class="hljs-string">&quot;total_flos&quot;</span>, <span class="hljs-literal">None</span>) <span class="hljs-keyword">if</span> state.is_local_process_zero: <span class="hljs-built_in">print</span>(logs)`}}),Wt=new T({props:{name:"on_epoch_begin",anchor:"transformers.TrainerCallback.on_epoch_begin",parameters:[{name:"args",val:": TrainingArguments"},{name:"state",val:": TrainerState"},{name:"control",val:": TrainerControl"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_callback.py#L227"}}),zt=new T({props:{name:"on_epoch_end",anchor:"transformers.TrainerCallback.on_epoch_end",parameters:[{name:"args",val:": TrainingArguments"},{name:"state",val:": TrainerState"},{name:"control",val:": TrainerControl"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_callback.py#L233"}}),Bt=new T({props:{name:"on_evaluate",anchor:"transformers.TrainerCallback.on_evaluate",parameters:[{name:"args",val:": TrainingArguments"},{name:"state",val:": TrainerState"},{name:"control",val:": TrainerControl"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_callback.py#L259"}}),Vt=new T({props:{name:"on_init_end",anchor:"transformers.TrainerCallback.on_init_end",parameters:[{name:"args",val:": TrainingArguments"},{name:"state",val:": TrainerState"},{name:"control",val:": TrainerControl"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_callback.py#L209"}}),Rt=new T({props:{name:"on_log",anchor:"transformers.TrainerCallback.on_log",parameters:[{name:"args",val:": TrainingArguments"},{name:"state",val:": TrainerState"},{name:"control",val:": TrainerControl"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_callback.py#L271"}}),Ht=new T({props:{name:"on_prediction_step",anchor:"transformers.TrainerCallback.on_prediction_step",parameters:[{name:"args",val:": TrainingArguments"},{name:"state",val:": TrainerState"},{name:"control",val:": TrainerControl"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_callback.py#L277"}}),Ut=new T({props:{name:"on_save",anchor:"transformers.TrainerCallback.on_save",parameters:[{name:"args",val:": TrainingArguments"},{name:"state",val:": TrainerState"},{name:"control",val:": TrainerControl"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_callback.py#L265"}}),Gt=new T({props:{name:"on_step_begin",anchor:"transformers.TrainerCallback.on_step_begin",parameters:[{name:"args",val:": TrainingArguments"},{name:"state",val:": TrainerState"},{name:"control",val:": TrainerControl"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_callback.py#L239"}}),Jt=new T({props:{name:"on_step_end",anchor:"transformers.TrainerCallback.on_step_end",parameters:[{name:"args",val:": TrainingArguments"},{name:"state",val:": TrainerState"},{name:"control",val:": TrainerControl"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_callback.py#L252"}}),Yt=new T({props:{name:"on_substep_end",anchor:"transformers.TrainerCallback.on_substep_end",parameters:[{name:"args",val:": TrainingArguments"},{name:"state",val:": TrainerState"},{name:"control",val:": TrainerControl"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_callback.py#L246"}}),Xt=new T({props:{name:"on_train_begin",anchor:"transformers.TrainerCallback.on_train_begin",parameters:[{name:"args",val:": TrainingArguments"},{name:"state",val:": TrainerState"},{name:"control",val:": TrainerControl"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_callback.py#L215"}}),Kt=new T({props:{name:"on_train_end",anchor:"transformers.TrainerCallback.on_train_end",parameters:[{name:"args",val:": TrainingArguments"},{name:"state",val:": TrainerState"},{name:"control",val:": TrainerControl"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_callback.py#L221"}}),Qt=new Wh({props:{code:`class MyCallback(TrainerCallback): "A callback that prints a message at the beginning of training" def on_train_begin(self, args, state, control, **kwargs): print("Starting training") trainer = Trainer( model, args, train_dataset=train_dataset, eval_dataset=eval_dataset, callbacks=[MyCallback], # We can either pass the callback class this way or an instance of it (MyCallback()) )`,highlighted:`<span class="hljs-keyword">class</span> <span class="hljs-title class_">MyCallback</span>(<span class="hljs-title class_ inherited__">TrainerCallback</span>): <span class="hljs-string">&quot;A callback that prints a message at the beginning of training&quot;</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">on_train_begin</span>(<span class="hljs-params">self, args, state, control, **kwargs</span>): <span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;Starting training&quot;</span>) trainer = Trainer( model, args, train_dataset=train_dataset, eval_dataset=eval_dataset, callbacks=[MyCallback], <span class="hljs-comment"># We can either pass the callback class this way or an instance of it (MyCallback())</span> )`}}),Zt=new Wh({props:{code:`trainer = Trainer(...) trainer.add_callback(MyCallback) # Alternatively, we can pass an instance of the callback class trainer.add_callback(MyCallback())`,highlighted:`trainer = Trainer(...) trainer.add_callback(MyCallback) <span class="hljs-comment"># Alternatively, we can pass an instance of the callback class</span> trainer.add_callback(MyCallback())`}}),ea=new Pn({}),ta=new T({props:{name:"class transformers.TrainerState",anchor:"transformers.TrainerState",parameters:[{name:"epoch",val:": typing.Optional[float] = None"},{name:"global_step",val:": int = 0"},{name:"max_steps",val:": int = 0"},{name:"num_train_epochs",val:": int = 0"},{name:"total_flos",val:": float = 0"},{name:"log_history",val:": typing.List[typing.Dict[str, float]] = None"},{name:"best_metric",val:": typing.Optional[float] = None"},{name:"best_model_checkpoint",val:": typing.Optional[str] = None"},{name:"is_local_process_zero",val:": bool = True"},{name:"is_world_process_zero",val:": bool = True"},{name:"is_hyper_param_search",val:": bool = False"},{name:"trial_name",val:": str = None"},{name:"trial_params",val:": typing.Dict[str, typing.Union[str, float, int, bool]] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_callback.py#L35",parametersDescription:[{anchor:"transformers.TrainerState.epoch",description:`<strong>epoch</strong> (<code>float</code>, <em>optional</em>) &#x2014; Only set during training, will represent the epoch the training is at (the decimal part being the percentage of the current epoch completed).`,name:"epoch"},{anchor:"transformers.TrainerState.global_step",description:`<strong>global_step</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; During training, represents the number of update steps completed.`,name:"global_step"},{anchor:"transformers.TrainerState.max_steps",description:`<strong>max_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; The number of update steps to do during the current training.`,name:"max_steps"},{anchor:"transformers.TrainerState.total_flos",description:`<strong>total_flos</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; The total number of floating operations done by the model since the beginning of training (stored as floats to avoid overflow).`,name:"total_flos"},{anchor:"transformers.TrainerState.log_history",description:`<strong>log_history</strong> (<code>List[Dict[str, float]]</code>, <em>optional</em>) &#x2014; The list of logs done since the beginning of training.`,name:"log_history"},{anchor:"transformers.TrainerState.best_metric",description:`<strong>best_metric</strong> (<code>float</code>, <em>optional</em>) &#x2014; When tracking the best model, the value of the best metric encountered so far.`,name:"best_metric"},{anchor:"transformers.TrainerState.best_model_checkpoint",description:`<strong>best_model_checkpoint</strong> (<code>str</code>, <em>optional</em>) &#x2014; When tracking the best model, the value of the name of the checkpoint for the best model encountered so far.`,name:"best_model_checkpoint"},{anchor:"transformers.TrainerState.is_local_process_zero",description:`<strong>is_local_process_zero</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several machines) main process.`,name:"is_local_process_zero"},{anchor:"transformers.TrainerState.is_world_process_zero",description:`<strong>is_world_process_zero</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not this process is the global main process (when training in a distributed fashion on several machines, this is only going to be <code>True</code> for one process).`,name:"is_world_process_zero"},{anchor:"transformers.TrainerState.is_hyper_param_search",description:`<strong>is_hyper_param_search</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether we are in the process of a hyper parameter search using Trainer.hyperparameter_search. This will impact the way data will be logged in TensorBoard.`,name:"is_hyper_param_search"}]}}),He=new ip({props:{$$slots:{default:[cp]},$$scope:{ctx:hr}}}),aa=new T({props:{name:"load_from_json",anchor:"transformers.TrainerState.load_from_json",parameters:[{name:"json_path",val:": str"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_callback.py#L101"}}),na=new T({props:{name:"save_to_json",anchor:"transformers.TrainerState.save_to_json",parameters:[{name:"json_path",val:": str"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_callback.py#L95"}}),oa=new Pn({}),la=new T({props:{name:"class transformers.TrainerControl",anchor:"transformers.TrainerControl",parameters:[{name:"should_training_stop",val:": bool = False"},{name:"should_epoch_stop",val:": bool = False"},{name:"should_save",val:": bool = False"},{name:"should_evaluate",val:": bool = False"},{name:"should_log",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_callback.py#L110",parametersDescription:[{anchor:"transformers.TrainerControl.should_training_stop",description:`<strong>should_training_stop</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the training should be interrupted.</p> <p>If <code>True</code>, this variable will not be set back to <code>False</code>. The training will just stop.`,name:"should_training_stop"},{anchor:"transformers.TrainerControl.should_epoch_stop",description:`<strong>should_epoch_stop</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the current epoch should be interrupted.</p> <p>If <code>True</code>, this variable will be set back to <code>False</code> at the beginning of the next epoch.`,name:"should_epoch_stop"},{anchor:"transformers.TrainerControl.should_save",description:`<strong>should_save</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the model should be saved at this step.</p> <p>If <code>True</code>, this variable will be set back to <code>False</code> at the beginning of the next step.`,name:"should_save"},{anchor:"transformers.TrainerControl.should_evaluate",description:`<strong>should_evaluate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the model should be evaluated at this step.</p> <p>If <code>True</code>, this variable will be set back to <code>False</code> at the beginning of the next step.`,name:"should_evaluate"},{anchor:"transformers.TrainerControl.should_log",description:`<strong>should_log</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the logs should be reported at this step.</p> <p>If <code>True</code>, this variable will be set back to <code>False</code> at the beginning of the next step.`,name:"should_log"}]}}),{c(){w=s("meta"),Y=c(),D=s("h1"),P=s("a"),Z=s("span"),_(M.$$.fragment),et=c(),ee=s("span"),te=r("Callbacks"),x=c(),q=s("p"),fa=r(`Callbacks are objects that can customize the behavior of the training loop in the PyTorch `),ha=s("a"),Gs=r("Trainer"),Js=r(` (this feature is not yet implemented in TensorFlow) that can inspect the training loop state (for progress reporting, logging on TensorBoard or other ML platforms\u2026) and take decisions (like early stopping).`),On=c(),j=s("p"),Ys=r("Callbacks are \u201Cread only\u201D pieces of code, apart from the "),ma=s("a"),Xs=r("TrainerControl"),Ks=r(` object they return, they cannot change anything in the training loop. For customizations that require changes in the training loop, you should subclass `),da=s("a"),Qs=r("Trainer"),Zs=r(" and override the methods you need (see "),pa=s("a"),eo=r("trainer"),to=r(" for examples)."),In=c(),ke=s("p"),ao=r("By default a "),ga=s("a"),ro=r("Trainer"),no=r(" will use the following callbacks:"),Mn=c(),y=s("ul"),_a=s("li"),ua=s("a"),so=r("DefaultFlowCallback"),oo=r(" which handles the default behavior for logging, saving and evaluation."),lo=c(),X=s("li"),va=s("a"),io=r("PrinterCallback"),co=r(" or "),ba=s("a"),fo=r("ProgressCallback"),ho=r(` to display progress and print the logs (the first one is used if you deactivate tqdm through the `),Ea=s("a"),mo=r("TrainingArguments"),po=r(`, otherwise it\u2019s the second one).`),go=c(),ka=s("li"),$a=s("a"),_o=r("TensorBoardCallback"),uo=r(` if tensorboard is accessible (either through PyTorch >= 1.4 or tensorboardX).`),vo=c(),$e=s("li"),Ta=s("a"),bo=r("WandbCallback"),Eo=r(" if "),tt=s("a"),ko=r("wandb"),$o=r(" is installed."),To=c(),Te=s("li"),Ca=s("a"),Co=r("CometCallback"),wo=r(" if "),at=s("a"),yo=r("comet_ml"),Ao=r(" is installed."),Do=c(),Ce=s("li"),wa=s("a"),Lo=r("MLflowCallback"),So=r(" if "),rt=s("a"),Po=r("mlflow"),Oo=r(" is installed."),Io=c(),we=s("li"),ya=s("a"),Mo=r("AzureMLCallback"),xo=r(" if "),nt=s("a"),Fo=r("azureml-sdk"),jo=r(` is installed.`),No=c(),ye=s("li"),Aa=s("a"),Wo=r("CodeCarbonCallback"),zo=r(" if "),st=s("a"),Bo=r("codecarbon"),Vo=r(` is installed.`),xn=c(),L=s("p"),qo=r("The main class that implements callbacks is "),Da=s("a"),Ro=r("TrainerCallback"),Ho=r(`. It gets the `),La=s("a"),Uo=r("TrainingArguments"),Go=r(" used to instantiate the "),Sa=s("a"),Jo=r("Trainer"),Yo=r(`, can access that Trainer\u2019s internal state via `),Pa=s("a"),Xo=r("TrainerState"),Ko=r(`, and can take some actions on the training loop via `),Oa=s("a"),Qo=r("TrainerControl"),Zo=r("."),Fn=c(),ae=s("h2"),Ae=s("a"),mr=s("span"),_(ot.$$.fragment),el=c(),dr=s("span"),tl=r("Available Callbacks"),jn=c(),De=s("p"),al=r("Here is the list of the available "),Ia=s("a"),rl=r("TrainerCallback"),nl=r(" in the library:"),Nn=c(),R=s("div"),_(lt.$$.fragment),sl=c(),re=s("p"),ol=r("A "),Ma=s("a"),ll=r("TrainerCallback"),il=r(" that sends the logs to "),it=s("a"),cl=r("Comet ML"),fl=r("."),hl=c(),N=s("div"),_(ct.$$.fragment),ml=c(),pr=s("p"),dl=r("Setup the optional Comet.ml integration."),pl=c(),C=s("p"),gl=r(`Environment: COMET_MODE (`),gr=s("code"),_l=r("str"),ul=r(", "),_r=s("em"),vl=r("optional"),bl=r(`): Whether to create an online, offline experiment or disable Comet logging. Can be \u201COFFLINE\u201D, \u201CONLINE\u201D, or \u201CDISABLED\u201D. Defaults to \u201CONLINE\u201D. COMET_PROJECT_NAME (`),ur=s("code"),El=r("str"),kl=r(", "),vr=s("em"),$l=r("optional"),Tl=r(`): Comet project name for experiments COMET_OFFLINE_DIRECTORY (`),br=s("code"),Cl=r("str"),wl=r(", "),Er=s("em"),yl=r("optional"),Al=r(`): Folder to use for saving offline experiments when `),kr=s("code"),Dl=r("COMET_MODE"),Ll=r(` is \u201COFFLINE\u201D COMET_LOG_ASSETS (`),$r=s("code"),Sl=r("str"),Pl=r(", "),Tr=s("em"),Ol=r("optional"),Il=r(`): Whether or not to log training assets (tf event logs, checkpoints, etc), to Comet. Can be \u201CTRUE\u201D, or \u201CFALSE\u201D. Defaults to \u201CTRUE\u201D.`),Ml=c(),ft=s("p"),xl=r(`For a number of configurable items in the environment, see `),ht=s("a"),Fl=r("here"),jl=r("."),Wn=c(),ne=s("div"),_(mt.$$.fragment),Nl=c(),dt=s("p"),Wl=r("A "),xa=s("a"),zl=r("TrainerCallback"),Bl=r(" that handles the default flow of the training loop for logs, evaluation and checkpoints."),zn=c(),se=s("div"),_(pt.$$.fragment),Vl=c(),gt=s("p"),ql=r("A bare "),Fa=s("a"),Rl=r("TrainerCallback"),Hl=r(" that just prints the logs."),Bn=c(),oe=s("div"),_(_t.$$.fragment),Ul=c(),ut=s("p"),Gl=r("A "),ja=s("a"),Jl=r("TrainerCallback"),Yl=r(" that displays the progress of training or evaluation."),Vn=c(),H=s("div"),_(vt.$$.fragment),Xl=c(),bt=s("p"),Kl=r("A "),Na=s("a"),Ql=r("TrainerCallback"),Zl=r(" that handles early stopping."),ei=c(),U=s("p"),ti=r("This callback depends on "),Wa=s("a"),ai=r("TrainingArguments"),ri=r(" argument "),Cr=s("em"),ni=r("load_best_model_at_end"),si=r(` functionality to set best_metric in `),za=s("a"),oi=r("TrainerState"),li=r("."),qn=c(),le=s("div"),_(Et.$$.fragment),ii=c(),ie=s("p"),ci=r("A "),Ba=s("a"),fi=r("TrainerCallback"),hi=r(" that sends the logs to "),kt=s("a"),mi=r("TensorBoard"),di=r("."),Rn=c(),G=s("div"),_($t.$$.fragment),pi=c(),ce=s("p"),gi=r("A "),Va=s("a"),_i=r("TrainerCallback"),ui=r(" that sends the logs to "),Tt=s("a"),vi=r("Weight and Biases"),bi=r("."),Ei=c(),W=s("div"),_(Ct.$$.fragment),ki=c(),wt=s("p"),$i=r("Setup the optional Weights & Biases ("),wr=s("em"),Ti=r("wandb"),Ci=r(") integration."),wi=c(),yt=s("p"),yi=r(`One can subclass and override this method to customize the setup if needed. Find more information `),At=s("a"),Ai=r("here"),Di=r(`. You can also override the following environment variables:`),Li=c(),d=s("p"),Si=r(`Environment: WANDB_LOG_MODEL (`),yr=s("code"),Pi=r("bool"),Oi=r(", "),Ar=s("em"),Ii=r("optional"),Mi=r(", defaults to "),Dr=s("code"),xi=r("False"),Fi=r(`): Whether or not to log model as artifact at the end of training. Use along with `),Lr=s("em"),ji=r("TrainingArguments.load_best_model_at_end"),Ni=r(` to upload best model. WANDB_WATCH (`),Sr=s("code"),Wi=r("str"),zi=r(", "),Pr=s("em"),Bi=r("optional"),Vi=r(" defaults to "),Or=s("code"),qi=r('"gradients"'),Ri=r(`): Can be `),Ir=s("code"),Hi=r('"gradients"'),Ui=r(", "),Mr=s("code"),Gi=r('"all"'),Ji=r(" or "),xr=s("code"),Yi=r('"false"'),Xi=r(". Set to "),Fr=s("code"),Ki=r('"false"'),Qi=r(" to disable gradient logging or "),jr=s("code"),Zi=r('"all"'),ec=r(` to log gradients and parameters. WANDB_PROJECT (`),Nr=s("code"),tc=r("str"),ac=r(", "),Wr=s("em"),rc=r("optional"),nc=r(", defaults to "),zr=s("code"),sc=r('"huggingface"'),oc=r(`): Set this to a custom string to store results in a different project. WANDB_DISABLED (`),Br=s("code"),lc=r("bool"),ic=r(", "),Vr=s("em"),cc=r("optional"),fc=r(", defaults to "),qr=s("code"),hc=r("False"),mc=r(`): Whether or not to disable wandb entirely. Set `),Rr=s("em"),dc=r("WANDB_DISABLED=true"),pc=r(" to disable."),Hn=c(),J=s("div"),_(Dt.$$.fragment),gc=c(),fe=s("p"),_c=r("A "),qa=s("a"),uc=r("TrainerCallback"),vc=r(" that sends the logs to "),Lt=s("a"),bc=r("MLflow"),Ec=r("."),kc=c(),z=s("div"),_(St.$$.fragment),$c=c(),Hr=s("p"),Tc=r("Setup the optional MLflow integration."),Cc=c(),he=s("p"),wc=r(`Environment: HF_MLFLOW_LOG_ARTIFACTS (`),Ur=s("code"),yc=r("str"),Ac=r(", "),Gr=s("em"),Dc=r("optional"),Lc=r(`): Whether to use MLflow .log_artifact() facility to log artifacts.`),Sc=c(),F=s("p"),Pc=r("This only makes sense if logging to a remote server, e.g. s3 or GCS. If set to "),Jr=s("code"),Oc=r("True"),Ic=r(" or "),Yr=s("em"),Mc=r("1"),xc=r(`, will copy whatever is in `),Ra=s("a"),Fc=r("TrainingArguments"),jc=r("\u2019s "),Xr=s("code"),Nc=r("output_dir"),Wc=r(` to the local or remote artifact storage. Using it without a remote storage will just copy the files to your artifact location.`),Un=c(),me=s("div"),_(Pt.$$.fragment),zc=c(),de=s("p"),Bc=r("A "),Ha=s("a"),Vc=r("TrainerCallback"),qc=r(" that sends the logs to "),Ot=s("a"),Rc=r("AzureML"),Hc=r("."),Gn=c(),pe=s("div"),_(It.$$.fragment),Uc=c(),Mt=s("p"),Gc=r("A "),Ua=s("a"),Jc=r("TrainerCallback"),Yc=r(" that tracks the CO2 emission of training."),Jn=c(),ge=s("h2"),Le=s("a"),Kr=s("span"),_(xt.$$.fragment),Xc=c(),Qr=s("span"),Kc=r("TrainerCallback"),Yn=c(),p=s("div"),_(Ft.$$.fragment),Qc=c(),Zr=s("p"),Zc=r(`A class for objects that will inspect the state of the training loop at some events and take some decisions. At each of those events the following arguments are available:`),ef=c(),jt=s("p"),tf=r("The "),en=s("code"),af=r("control"),rf=r(` object is the only one that can be changed by the callback, in which case the event that changes it should return the modified version.`),nf=c(),O=s("p"),sf=r("The argument "),tn=s("code"),of=r("args"),lf=r(", "),an=s("code"),cf=r("state"),ff=r(" and "),rn=s("code"),hf=r("control"),mf=r(" are positionals for all events, all the others are grouped in "),nn=s("code"),df=r("kwargs"),pf=r(`. You can unpack the ones you need in the signature of the event using them. As an example, see the code of the simple `),sn=s("code"),gf=r("PrinterCallback"),_f=r("."),uf=c(),on=s("p"),vf=r("Example:"),bf=c(),_(Nt.$$.fragment),Ef=c(),Se=s("div"),_(Wt.$$.fragment),kf=c(),ln=s("p"),$f=r("Event called at the beginning of an epoch."),Tf=c(),Pe=s("div"),_(zt.$$.fragment),Cf=c(),cn=s("p"),wf=r("Event called at the end of an epoch."),yf=c(),Oe=s("div"),_(Bt.$$.fragment),Af=c(),fn=s("p"),Df=r("Event called after an evaluation phase."),Lf=c(),Ie=s("div"),_(Vt.$$.fragment),Sf=c(),qt=s("p"),Pf=r("Event called at the end of the initialization of the "),Ga=s("a"),Of=r("Trainer"),If=r("."),Mf=c(),Me=s("div"),_(Rt.$$.fragment),xf=c(),hn=s("p"),Ff=r("Event called after logging the last logs."),jf=c(),xe=s("div"),_(Ht.$$.fragment),Nf=c(),mn=s("p"),Wf=r("Event called after a prediction step."),zf=c(),Fe=s("div"),_(Ut.$$.fragment),Bf=c(),dn=s("p"),Vf=r("Event called after a checkpoint save."),qf=c(),je=s("div"),_(Gt.$$.fragment),Rf=c(),pn=s("p"),Hf=r(`Event called at the beginning of a training step. If using gradient accumulation, one training step might take several inputs.`),Uf=c(),Ne=s("div"),_(Jt.$$.fragment),Gf=c(),gn=s("p"),Jf=r(`Event called at the end of a training step. If using gradient accumulation, one training step might take several inputs.`),Yf=c(),We=s("div"),_(Yt.$$.fragment),Xf=c(),_n=s("p"),Kf=r("Event called at the end of an substep during gradient accumulation."),Qf=c(),ze=s("div"),_(Xt.$$.fragment),Zf=c(),un=s("p"),eh=r("Event called at the beginning of training."),th=c(),Be=s("div"),_(Kt.$$.fragment),ah=c(),vn=s("p"),rh=r("Event called at the end of training."),Xn=c(),Ve=s("p"),nh=r("Here is an example of how to register a custom callback with the PyTorch "),Ja=s("a"),sh=r("Trainer"),oh=r(":"),Kn=c(),_(Qt.$$.fragment),Qn=c(),qe=s("p"),lh=r("Another way to register a callback is to call "),bn=s("code"),ih=r("trainer.add_callback()"),ch=r(" as follows:"),Zn=c(),_(Zt.$$.fragment),es=c(),_e=s("h2"),Re=s("a"),En=s("span"),_(ea.$$.fragment),fh=c(),kn=s("span"),hh=r("TrainerState"),ts=c(),I=s("div"),_(ta.$$.fragment),mh=c(),ue=s("p"),dh=r("A class containing the "),Ya=s("a"),ph=r("Trainer"),gh=r(` inner state that will be saved along the model and optimizer when checkpointing and passed to the `),Xa=s("a"),_h=r("TrainerCallback"),uh=r("."),vh=c(),_(He.$$.fragment),bh=c(),Ue=s("div"),_(aa.$$.fragment),Eh=c(),ra=s("p"),kh=r("Create an instance from the content of "),$n=s("code"),$h=r("json_path"),Th=r("."),Ch=c(),Ge=s("div"),_(na.$$.fragment),wh=c(),sa=s("p"),yh=r("Save the content of this instance in JSON format inside "),Tn=s("code"),Ah=r("json_path"),Dh=r("."),as=c(),ve=s("h2"),Je=s("a"),Cn=s("span"),_(oa.$$.fragment),Lh=c(),wn=s("span"),Sh=r("TrainerControl"),rs=c(),be=s("div"),_(la.$$.fragment),Ph=c(),Ee=s("p"),Oh=r("A class that handles the "),Ka=s("a"),Ih=r("Trainer"),Mh=r(" control flow. This class is used by the "),Qa=s("a"),xh=r("TrainerCallback"),Fh=r(` to activate some switches in the training loop.`),this.h()},l(a){const h=lp('[data-svelte="svelte-1phssyn"]',document.head);w=o(h,"META",{name:!0,content:!0}),h.forEach(t),Y=f(a),D=o(a,"H1",{class:!0});var ia=l(D);P=o(ia,"A",{id:!0,class:!0,href:!0});var zh=l(P);Z=o(zh,"SPAN",{});var Bh=l(Z);u(M.$$.fragment,Bh),Bh.forEach(t),zh.forEach(t),et=f(ia),ee=o(ia,"SPAN",{});var Vh=l(ee);te=n(Vh,"Callbacks"),Vh.forEach(t),ia.forEach(t),x=f(a),q=o(a,"P",{});var ss=l(q);fa=n(ss,`Callbacks are objects that can customize the behavior of the training loop in the PyTorch `),ha=o(ss,"A",{href:!0});var qh=l(ha);Gs=n(qh,"Trainer"),qh.forEach(t),Js=n(ss,` (this feature is not yet implemented in TensorFlow) that can inspect the training loop state (for progress reporting, logging on TensorBoard or other ML platforms\u2026) and take decisions (like early stopping).`),ss.forEach(t),On=f(a),j=o(a,"P",{});var Ye=l(j);Ys=n(Ye,"Callbacks are \u201Cread only\u201D pieces of code, apart from the "),ma=o(Ye,"A",{href:!0});var Rh=l(ma);Xs=n(Rh,"TrainerControl"),Rh.forEach(t),Ks=n(Ye,` object they return, they cannot change anything in the training loop. For customizations that require changes in the training loop, you should subclass `),da=o(Ye,"A",{href:!0});var Hh=l(da);Qs=n(Hh,"Trainer"),Hh.forEach(t),Zs=n(Ye," and override the methods you need (see "),pa=o(Ye,"A",{href:!0});var Uh=l(pa);eo=n(Uh,"trainer"),Uh.forEach(t),to=n(Ye," for examples)."),Ye.forEach(t),In=f(a),ke=o(a,"P",{});var os=l(ke);ao=n(os,"By default a "),ga=o(os,"A",{href:!0});var Gh=l(ga);ro=n(Gh,"Trainer"),Gh.forEach(t),no=n(os," will use the following callbacks:"),os.forEach(t),Mn=f(a),y=o(a,"UL",{});var S=l(y);_a=o(S,"LI",{});var jh=l(_a);ua=o(jh,"A",{href:!0});var Jh=l(ua);so=n(Jh,"DefaultFlowCallback"),Jh.forEach(t),oo=n(jh," which handles the default behavior for logging, saving and evaluation."),jh.forEach(t),lo=f(S),X=o(S,"LI",{});var ca=l(X);va=o(ca,"A",{href:!0});var Yh=l(va);io=n(Yh,"PrinterCallback"),Yh.forEach(t),co=n(ca," or "),ba=o(ca,"A",{href:!0});var Xh=l(ba);fo=n(Xh,"ProgressCallback"),Xh.forEach(t),ho=n(ca,` to display progress and print the logs (the first one is used if you deactivate tqdm through the `),Ea=o(ca,"A",{href:!0});var Kh=l(Ea);mo=n(Kh,"TrainingArguments"),Kh.forEach(t),po=n(ca,`, otherwise it\u2019s the second one).`),ca.forEach(t),go=f(S),ka=o(S,"LI",{});var Nh=l(ka);$a=o(Nh,"A",{href:!0});var Qh=l($a);_o=n(Qh,"TensorBoardCallback"),Qh.forEach(t),uo=n(Nh,` if tensorboard is accessible (either through PyTorch >= 1.4 or tensorboardX).`),Nh.forEach(t),vo=f(S),$e=o(S,"LI",{});var yn=l($e);Ta=o(yn,"A",{href:!0});var Zh=l(Ta);bo=n(Zh,"WandbCallback"),Zh.forEach(t),Eo=n(yn," if "),tt=o(yn,"A",{href:!0,rel:!0});var em=l(tt);ko=n(em,"wandb"),em.forEach(t),$o=n(yn," is installed."),yn.forEach(t),To=f(S),Te=o(S,"LI",{});var An=l(Te);Ca=o(An,"A",{href:!0});var tm=l(Ca);Co=n(tm,"CometCallback"),tm.forEach(t),wo=n(An," if "),at=o(An,"A",{href:!0,rel:!0});var am=l(at);yo=n(am,"comet_ml"),am.forEach(t),Ao=n(An," is installed."),An.forEach(t),Do=f(S),Ce=o(S,"LI",{});var Dn=l(Ce);wa=o(Dn,"A",{href:!0});var rm=l(wa);Lo=n(rm,"MLflowCallback"),rm.forEach(t),So=n(Dn," if "),rt=o(Dn,"A",{href:!0,rel:!0});var nm=l(rt);Po=n(nm,"mlflow"),nm.forEach(t),Oo=n(Dn," is installed."),Dn.forEach(t),Io=f(S),we=o(S,"LI",{});var Ln=l(we);ya=o(Ln,"A",{href:!0});var sm=l(ya);Mo=n(sm,"AzureMLCallback"),sm.forEach(t),xo=n(Ln," if "),nt=o(Ln,"A",{href:!0,rel:!0});var om=l(nt);Fo=n(om,"azureml-sdk"),om.forEach(t),jo=n(Ln,` is installed.`),Ln.forEach(t),No=f(S),ye=o(S,"LI",{});var Sn=l(ye);Aa=o(Sn,"A",{href:!0});var lm=l(Aa);Wo=n(lm,"CodeCarbonCallback"),lm.forEach(t),zo=n(Sn," if "),st=o(Sn,"A",{href:!0,rel:!0});var im=l(st);Bo=n(im,"codecarbon"),im.forEach(t),Vo=n(Sn,` is installed.`),Sn.forEach(t),S.forEach(t),xn=f(a),L=o(a,"P",{});var B=l(L);qo=n(B,"The main class that implements callbacks is "),Da=o(B,"A",{href:!0});var cm=l(Da);Ro=n(cm,"TrainerCallback"),cm.forEach(t),Ho=n(B,`. It gets the `),La=o(B,"A",{href:!0});var fm=l(La);Uo=n(fm,"TrainingArguments"),fm.forEach(t),Go=n(B," used to instantiate the "),Sa=o(B,"A",{href:!0});var hm=l(Sa);Jo=n(hm,"Trainer"),hm.forEach(t),Yo=n(B,`, can access that Trainer\u2019s internal state via `),Pa=o(B,"A",{href:!0});var mm=l(Pa);Xo=n(mm,"TrainerState"),mm.forEach(t),Ko=n(B,`, and can take some actions on the training loop via `),Oa=o(B,"A",{href:!0});var dm=l(Oa);Qo=n(dm,"TrainerControl"),dm.forEach(t),Zo=n(B,"."),B.forEach(t),Fn=f(a),ae=o(a,"H2",{class:!0});var ls=l(ae);Ae=o(ls,"A",{id:!0,class:!0,href:!0});var pm=l(Ae);mr=o(pm,"SPAN",{});var gm=l(mr);u(ot.$$.fragment,gm),gm.forEach(t),pm.forEach(t),el=f(ls),dr=o(ls,"SPAN",{});var _m=l(dr);tl=n(_m,"Available Callbacks"),_m.forEach(t),ls.forEach(t),jn=f(a),De=o(a,"P",{});var is=l(De);al=n(is,"Here is the list of the available "),Ia=o(is,"A",{href:!0});var um=l(Ia);rl=n(um,"TrainerCallback"),um.forEach(t),nl=n(is," in the library:"),is.forEach(t),Nn=f(a),R=o(a,"DIV",{class:!0});var Za=l(R);u(lt.$$.fragment,Za),sl=f(Za),re=o(Za,"P",{});var er=l(re);ol=n(er,"A "),Ma=o(er,"A",{href:!0});var vm=l(Ma);ll=n(vm,"TrainerCallback"),vm.forEach(t),il=n(er," that sends the logs to "),it=o(er,"A",{href:!0,rel:!0});var bm=l(it);cl=n(bm,"Comet ML"),bm.forEach(t),fl=n(er,"."),er.forEach(t),hl=f(Za),N=o(Za,"DIV",{class:!0});var Xe=l(N);u(ct.$$.fragment,Xe),ml=f(Xe),pr=o(Xe,"P",{});var Em=l(pr);dl=n(Em,"Setup the optional Comet.ml integration."),Em.forEach(t),pl=f(Xe),C=o(Xe,"P",{});var A=l(C);gl=n(A,`Environment: COMET_MODE (`),gr=o(A,"CODE",{});var km=l(gr);_l=n(km,"str"),km.forEach(t),ul=n(A,", "),_r=o(A,"EM",{});var $m=l(_r);vl=n($m,"optional"),$m.forEach(t),bl=n(A,`): Whether to create an online, offline experiment or disable Comet logging. Can be \u201COFFLINE\u201D, \u201CONLINE\u201D, or \u201CDISABLED\u201D. Defaults to \u201CONLINE\u201D. COMET_PROJECT_NAME (`),ur=o(A,"CODE",{});var Tm=l(ur);El=n(Tm,"str"),Tm.forEach(t),kl=n(A,", "),vr=o(A,"EM",{});var Cm=l(vr);$l=n(Cm,"optional"),Cm.forEach(t),Tl=n(A,`): Comet project name for experiments COMET_OFFLINE_DIRECTORY (`),br=o(A,"CODE",{});var wm=l(br);Cl=n(wm,"str"),wm.forEach(t),wl=n(A,", "),Er=o(A,"EM",{});var ym=l(Er);yl=n(ym,"optional"),ym.forEach(t),Al=n(A,`): Folder to use for saving offline experiments when `),kr=o(A,"CODE",{});var Am=l(kr);Dl=n(Am,"COMET_MODE"),Am.forEach(t),Ll=n(A,` is \u201COFFLINE\u201D COMET_LOG_ASSETS (`),$r=o(A,"CODE",{});var Dm=l($r);Sl=n(Dm,"str"),Dm.forEach(t),Pl=n(A,", "),Tr=o(A,"EM",{});var Lm=l(Tr);Ol=n(Lm,"optional"),Lm.forEach(t),Il=n(A,`): Whether or not to log training assets (tf event logs, checkpoints, etc), to Comet. Can be \u201CTRUE\u201D, or \u201CFALSE\u201D. Defaults to \u201CTRUE\u201D.`),A.forEach(t),Ml=f(Xe),ft=o(Xe,"P",{});var cs=l(ft);xl=n(cs,`For a number of configurable items in the environment, see `),ht=o(cs,"A",{href:!0,rel:!0});var Sm=l(ht);Fl=n(Sm,"here"),Sm.forEach(t),jl=n(cs,"."),cs.forEach(t),Xe.forEach(t),Za.forEach(t),Wn=f(a),ne=o(a,"DIV",{class:!0});var fs=l(ne);u(mt.$$.fragment,fs),Nl=f(fs),dt=o(fs,"P",{});var hs=l(dt);Wl=n(hs,"A "),xa=o(hs,"A",{href:!0});var Pm=l(xa);zl=n(Pm,"TrainerCallback"),Pm.forEach(t),Bl=n(hs," that handles the default flow of the training loop for logs, evaluation and checkpoints."),hs.forEach(t),fs.forEach(t),zn=f(a),se=o(a,"DIV",{class:!0});var ms=l(se);u(pt.$$.fragment,ms),Vl=f(ms),gt=o(ms,"P",{});var ds=l(gt);ql=n(ds,"A bare "),Fa=o(ds,"A",{href:!0});var Om=l(Fa);Rl=n(Om,"TrainerCallback"),Om.forEach(t),Hl=n(ds," that just prints the logs."),ds.forEach(t),ms.forEach(t),Bn=f(a),oe=o(a,"DIV",{class:!0});var ps=l(oe);u(_t.$$.fragment,ps),Ul=f(ps),ut=o(ps,"P",{});var gs=l(ut);Gl=n(gs,"A "),ja=o(gs,"A",{href:!0});var Im=l(ja);Jl=n(Im,"TrainerCallback"),Im.forEach(t),Yl=n(gs," that displays the progress of training or evaluation."),gs.forEach(t),ps.forEach(t),Vn=f(a),H=o(a,"DIV",{class:!0});var tr=l(H);u(vt.$$.fragment,tr),Xl=f(tr),bt=o(tr,"P",{});var _s=l(bt);Kl=n(_s,"A "),Na=o(_s,"A",{href:!0});var Mm=l(Na);Ql=n(Mm,"TrainerCallback"),Mm.forEach(t),Zl=n(_s," that handles early stopping."),_s.forEach(t),ei=f(tr),U=o(tr,"P",{});var Ke=l(U);ti=n(Ke,"This callback depends on "),Wa=o(Ke,"A",{href:!0});var xm=l(Wa);ai=n(xm,"TrainingArguments"),xm.forEach(t),ri=n(Ke," argument "),Cr=o(Ke,"EM",{});var Fm=l(Cr);ni=n(Fm,"load_best_model_at_end"),Fm.forEach(t),si=n(Ke,` functionality to set best_metric in `),za=o(Ke,"A",{href:!0});var jm=l(za);oi=n(jm,"TrainerState"),jm.forEach(t),li=n(Ke,"."),Ke.forEach(t),tr.forEach(t),qn=f(a),le=o(a,"DIV",{class:!0});var us=l(le);u(Et.$$.fragment,us),ii=f(us),ie=o(us,"P",{});var ar=l(ie);ci=n(ar,"A "),Ba=o(ar,"A",{href:!0});var Nm=l(Ba);fi=n(Nm,"TrainerCallback"),Nm.forEach(t),hi=n(ar," that sends the logs to "),kt=o(ar,"A",{href:!0,rel:!0});var Wm=l(kt);mi=n(Wm,"TensorBoard"),Wm.forEach(t),di=n(ar,"."),ar.forEach(t),us.forEach(t),Rn=f(a),G=o(a,"DIV",{class:!0});var rr=l(G);u($t.$$.fragment,rr),pi=f(rr),ce=o(rr,"P",{});var nr=l(ce);gi=n(nr,"A "),Va=o(nr,"A",{href:!0});var zm=l(Va);_i=n(zm,"TrainerCallback"),zm.forEach(t),ui=n(nr," that sends the logs to "),Tt=o(nr,"A",{href:!0,rel:!0});var Bm=l(Tt);vi=n(Bm,"Weight and Biases"),Bm.forEach(t),bi=n(nr,"."),nr.forEach(t),Ei=f(rr),W=o(rr,"DIV",{class:!0});var Qe=l(W);u(Ct.$$.fragment,Qe),ki=f(Qe),wt=o(Qe,"P",{});var vs=l(wt);$i=n(vs,"Setup the optional Weights & Biases ("),wr=o(vs,"EM",{});var Vm=l(wr);Ti=n(Vm,"wandb"),Vm.forEach(t),Ci=n(vs,") integration."),vs.forEach(t),wi=f(Qe),yt=o(Qe,"P",{});var bs=l(yt);yi=n(bs,`One can subclass and override this method to customize the setup if needed. Find more information `),At=o(bs,"A",{href:!0,rel:!0});var qm=l(At);Ai=n(qm,"here"),qm.forEach(t),Di=n(bs,`. You can also override the following environment variables:`),bs.forEach(t),Li=f(Qe),d=o(Qe,"P",{});var g=l(d);Si=n(g,`Environment: WANDB_LOG_MODEL (`),yr=o(g,"CODE",{});var Rm=l(yr);Pi=n(Rm,"bool"),Rm.forEach(t),Oi=n(g,", "),Ar=o(g,"EM",{});var Hm=l(Ar);Ii=n(Hm,"optional"),Hm.forEach(t),Mi=n(g,", defaults to "),Dr=o(g,"CODE",{});var Um=l(Dr);xi=n(Um,"False"),Um.forEach(t),Fi=n(g,`): Whether or not to log model as artifact at the end of training. Use along with `),Lr=o(g,"EM",{});var Gm=l(Lr);ji=n(Gm,"TrainingArguments.load_best_model_at_end"),Gm.forEach(t),Ni=n(g,` to upload best model. WANDB_WATCH (`),Sr=o(g,"CODE",{});var Jm=l(Sr);Wi=n(Jm,"str"),Jm.forEach(t),zi=n(g,", "),Pr=o(g,"EM",{});var Ym=l(Pr);Bi=n(Ym,"optional"),Ym.forEach(t),Vi=n(g," defaults to "),Or=o(g,"CODE",{});var Xm=l(Or);qi=n(Xm,'"gradients"'),Xm.forEach(t),Ri=n(g,`): Can be `),Ir=o(g,"CODE",{});var Km=l(Ir);Hi=n(Km,'"gradients"'),Km.forEach(t),Ui=n(g,", "),Mr=o(g,"CODE",{});var Qm=l(Mr);Gi=n(Qm,'"all"'),Qm.forEach(t),Ji=n(g," or "),xr=o(g,"CODE",{});var Zm=l(xr);Yi=n(Zm,'"false"'),Zm.forEach(t),Xi=n(g,". Set to "),Fr=o(g,"CODE",{});var ed=l(Fr);Ki=n(ed,'"false"'),ed.forEach(t),Qi=n(g," to disable gradient logging or "),jr=o(g,"CODE",{});var td=l(jr);Zi=n(td,'"all"'),td.forEach(t),ec=n(g,` to log gradients and parameters. WANDB_PROJECT (`),Nr=o(g,"CODE",{});var ad=l(Nr);tc=n(ad,"str"),ad.forEach(t),ac=n(g,", "),Wr=o(g,"EM",{});var rd=l(Wr);rc=n(rd,"optional"),rd.forEach(t),nc=n(g,", defaults to "),zr=o(g,"CODE",{});var nd=l(zr);sc=n(nd,'"huggingface"'),nd.forEach(t),oc=n(g,`): Set this to a custom string to store results in a different project. WANDB_DISABLED (`),Br=o(g,"CODE",{});var sd=l(Br);lc=n(sd,"bool"),sd.forEach(t),ic=n(g,", "),Vr=o(g,"EM",{});var od=l(Vr);cc=n(od,"optional"),od.forEach(t),fc=n(g,", defaults to "),qr=o(g,"CODE",{});var ld=l(qr);hc=n(ld,"False"),ld.forEach(t),mc=n(g,`): Whether or not to disable wandb entirely. Set `),Rr=o(g,"EM",{});var id=l(Rr);dc=n(id,"WANDB_DISABLED=true"),id.forEach(t),pc=n(g," to disable."),g.forEach(t),Qe.forEach(t),rr.forEach(t),Hn=f(a),J=o(a,"DIV",{class:!0});var sr=l(J);u(Dt.$$.fragment,sr),gc=f(sr),fe=o(sr,"P",{});var or=l(fe);_c=n(or,"A "),qa=o(or,"A",{href:!0});var cd=l(qa);uc=n(cd,"TrainerCallback"),cd.forEach(t),vc=n(or," that sends the logs to "),Lt=o(or,"A",{href:!0,rel:!0});var fd=l(Lt);bc=n(fd,"MLflow"),fd.forEach(t),Ec=n(or,"."),or.forEach(t),kc=f(sr),z=o(sr,"DIV",{class:!0});var Ze=l(z);u(St.$$.fragment,Ze),$c=f(Ze),Hr=o(Ze,"P",{});var hd=l(Hr);Tc=n(hd,"Setup the optional MLflow integration."),hd.forEach(t),Cc=f(Ze),he=o(Ze,"P",{});var lr=l(he);wc=n(lr,`Environment: HF_MLFLOW_LOG_ARTIFACTS (`),Ur=o(lr,"CODE",{});var md=l(Ur);yc=n(md,"str"),md.forEach(t),Ac=n(lr,", "),Gr=o(lr,"EM",{});var dd=l(Gr);Dc=n(dd,"optional"),dd.forEach(t),Lc=n(lr,`): Whether to use MLflow .log_artifact() facility to log artifacts.`),lr.forEach(t),Sc=f(Ze),F=o(Ze,"P",{});var K=l(F);Pc=n(K,"This only makes sense if logging to a remote server, e.g. s3 or GCS. If set to "),Jr=o(K,"CODE",{});var pd=l(Jr);Oc=n(pd,"True"),pd.forEach(t),Ic=n(K," or "),Yr=o(K,"EM",{});var gd=l(Yr);Mc=n(gd,"1"),gd.forEach(t),xc=n(K,`, will copy whatever is in `),Ra=o(K,"A",{href:!0});var _d=l(Ra);Fc=n(_d,"TrainingArguments"),_d.forEach(t),jc=n(K,"\u2019s "),Xr=o(K,"CODE",{});var ud=l(Xr);Nc=n(ud,"output_dir"),ud.forEach(t),Wc=n(K,` to the local or remote artifact storage. Using it without a remote storage will just copy the files to your artifact location.`),K.forEach(t),Ze.forEach(t),sr.forEach(t),Un=f(a),me=o(a,"DIV",{class:!0});var Es=l(me);u(Pt.$$.fragment,Es),zc=f(Es),de=o(Es,"P",{});var ir=l(de);Bc=n(ir,"A "),Ha=o(ir,"A",{href:!0});var vd=l(Ha);Vc=n(vd,"TrainerCallback"),vd.forEach(t),qc=n(ir," that sends the logs to "),Ot=o(ir,"A",{href:!0,rel:!0});var bd=l(Ot);Rc=n(bd,"AzureML"),bd.forEach(t),Hc=n(ir,"."),ir.forEach(t),Es.forEach(t),Gn=f(a),pe=o(a,"DIV",{class:!0});var ks=l(pe);u(It.$$.fragment,ks),Uc=f(ks),Mt=o(ks,"P",{});var $s=l(Mt);Gc=n($s,"A "),Ua=o($s,"A",{href:!0});var Ed=l(Ua);Jc=n(Ed,"TrainerCallback"),Ed.forEach(t),Yc=n($s," that tracks the CO2 emission of training."),$s.forEach(t),ks.forEach(t),Jn=f(a),ge=o(a,"H2",{class:!0});var Ts=l(ge);Le=o(Ts,"A",{id:!0,class:!0,href:!0});var kd=l(Le);Kr=o(kd,"SPAN",{});var $d=l(Kr);u(xt.$$.fragment,$d),$d.forEach(t),kd.forEach(t),Xc=f(Ts),Qr=o(Ts,"SPAN",{});var Td=l(Qr);Kc=n(Td,"TrainerCallback"),Td.forEach(t),Ts.forEach(t),Yn=f(a),p=o(a,"DIV",{class:!0});var $=l(p);u(Ft.$$.fragment,$),Qc=f($),Zr=o($,"P",{});var Cd=l(Zr);Zc=n(Cd,`A class for objects that will inspect the state of the training loop at some events and take some decisions. At each of those events the following arguments are available:`),Cd.forEach(t),ef=f($),jt=o($,"P",{});var Cs=l(jt);tf=n(Cs,"The "),en=o(Cs,"CODE",{});var wd=l(en);af=n(wd,"control"),wd.forEach(t),rf=n(Cs,` object is the only one that can be changed by the callback, in which case the event that changes it should return the modified version.`),Cs.forEach(t),nf=f($),O=o($,"P",{});var V=l(O);sf=n(V,"The argument "),tn=o(V,"CODE",{});var yd=l(tn);of=n(yd,"args"),yd.forEach(t),lf=n(V,", "),an=o(V,"CODE",{});var Ad=l(an);cf=n(Ad,"state"),Ad.forEach(t),ff=n(V," and "),rn=o(V,"CODE",{});var Dd=l(rn);hf=n(Dd,"control"),Dd.forEach(t),mf=n(V," are positionals for all events, all the others are grouped in "),nn=o(V,"CODE",{});var Ld=l(nn);df=n(Ld,"kwargs"),Ld.forEach(t),pf=n(V,`. You can unpack the ones you need in the signature of the event using them. As an example, see the code of the simple `),sn=o(V,"CODE",{});var Sd=l(sn);gf=n(Sd,"PrinterCallback"),Sd.forEach(t),_f=n(V,"."),V.forEach(t),uf=f($),on=o($,"P",{});var Pd=l(on);vf=n(Pd,"Example:"),Pd.forEach(t),bf=f($),u(Nt.$$.fragment,$),Ef=f($),Se=o($,"DIV",{class:!0});var ws=l(Se);u(Wt.$$.fragment,ws),kf=f(ws),ln=o(ws,"P",{});var Od=l(ln);$f=n(Od,"Event called at the beginning of an epoch."),Od.forEach(t),ws.forEach(t),Tf=f($),Pe=o($,"DIV",{class:!0});var ys=l(Pe);u(zt.$$.fragment,ys),Cf=f(ys),cn=o(ys,"P",{});var Id=l(cn);wf=n(Id,"Event called at the end of an epoch."),Id.forEach(t),ys.forEach(t),yf=f($),Oe=o($,"DIV",{class:!0});var As=l(Oe);u(Bt.$$.fragment,As),Af=f(As),fn=o(As,"P",{});var Md=l(fn);Df=n(Md,"Event called after an evaluation phase."),Md.forEach(t),As.forEach(t),Lf=f($),Ie=o($,"DIV",{class:!0});var Ds=l(Ie);u(Vt.$$.fragment,Ds),Sf=f(Ds),qt=o(Ds,"P",{});var Ls=l(qt);Pf=n(Ls,"Event called at the end of the initialization of the "),Ga=o(Ls,"A",{href:!0});var xd=l(Ga);Of=n(xd,"Trainer"),xd.forEach(t),If=n(Ls,"."),Ls.forEach(t),Ds.forEach(t),Mf=f($),Me=o($,"DIV",{class:!0});var Ss=l(Me);u(Rt.$$.fragment,Ss),xf=f(Ss),hn=o(Ss,"P",{});var Fd=l(hn);Ff=n(Fd,"Event called after logging the last logs."),Fd.forEach(t),Ss.forEach(t),jf=f($),xe=o($,"DIV",{class:!0});var Ps=l(xe);u(Ht.$$.fragment,Ps),Nf=f(Ps),mn=o(Ps,"P",{});var jd=l(mn);Wf=n(jd,"Event called after a prediction step."),jd.forEach(t),Ps.forEach(t),zf=f($),Fe=o($,"DIV",{class:!0});var Os=l(Fe);u(Ut.$$.fragment,Os),Bf=f(Os),dn=o(Os,"P",{});var Nd=l(dn);Vf=n(Nd,"Event called after a checkpoint save."),Nd.forEach(t),Os.forEach(t),qf=f($),je=o($,"DIV",{class:!0});var Is=l(je);u(Gt.$$.fragment,Is),Rf=f(Is),pn=o(Is,"P",{});var Wd=l(pn);Hf=n(Wd,`Event called at the beginning of a training step. If using gradient accumulation, one training step might take several inputs.`),Wd.forEach(t),Is.forEach(t),Uf=f($),Ne=o($,"DIV",{class:!0});var Ms=l(Ne);u(Jt.$$.fragment,Ms),Gf=f(Ms),gn=o(Ms,"P",{});var zd=l(gn);Jf=n(zd,`Event called at the end of a training step. If using gradient accumulation, one training step might take several inputs.`),zd.forEach(t),Ms.forEach(t),Yf=f($),We=o($,"DIV",{class:!0});var xs=l(We);u(Yt.$$.fragment,xs),Xf=f(xs),_n=o(xs,"P",{});var Bd=l(_n);Kf=n(Bd,"Event called at the end of an substep during gradient accumulation."),Bd.forEach(t),xs.forEach(t),Qf=f($),ze=o($,"DIV",{class:!0});var Fs=l(ze);u(Xt.$$.fragment,Fs),Zf=f(Fs),un=o(Fs,"P",{});var Vd=l(un);eh=n(Vd,"Event called at the beginning of training."),Vd.forEach(t),Fs.forEach(t),th=f($),Be=o($,"DIV",{class:!0});var js=l(Be);u(Kt.$$.fragment,js),ah=f(js),vn=o(js,"P",{});var qd=l(vn);rh=n(qd,"Event called at the end of training."),qd.forEach(t),js.forEach(t),$.forEach(t),Xn=f(a),Ve=o(a,"P",{});var Ns=l(Ve);nh=n(Ns,"Here is an example of how to register a custom callback with the PyTorch "),Ja=o(Ns,"A",{href:!0});var Rd=l(Ja);sh=n(Rd,"Trainer"),Rd.forEach(t),oh=n(Ns,":"),Ns.forEach(t),Kn=f(a),u(Qt.$$.fragment,a),Qn=f(a),qe=o(a,"P",{});var Ws=l(qe);lh=n(Ws,"Another way to register a callback is to call "),bn=o(Ws,"CODE",{});var Hd=l(bn);ih=n(Hd,"trainer.add_callback()"),Hd.forEach(t),ch=n(Ws," as follows:"),Ws.forEach(t),Zn=f(a),u(Zt.$$.fragment,a),es=f(a),_e=o(a,"H2",{class:!0});var zs=l(_e);Re=o(zs,"A",{id:!0,class:!0,href:!0});var Ud=l(Re);En=o(Ud,"SPAN",{});var Gd=l(En);u(ea.$$.fragment,Gd),Gd.forEach(t),Ud.forEach(t),fh=f(zs),kn=o(zs,"SPAN",{});var Jd=l(kn);hh=n(Jd,"TrainerState"),Jd.forEach(t),zs.forEach(t),ts=f(a),I=o(a,"DIV",{class:!0});var Q=l(I);u(ta.$$.fragment,Q),mh=f(Q),ue=o(Q,"P",{});var cr=l(ue);dh=n(cr,"A class containing the "),Ya=o(cr,"A",{href:!0});var Yd=l(Ya);ph=n(Yd,"Trainer"),Yd.forEach(t),gh=n(cr,` inner state that will be saved along the model and optimizer when checkpointing and passed to the `),Xa=o(cr,"A",{href:!0});var Xd=l(Xa);_h=n(Xd,"TrainerCallback"),Xd.forEach(t),uh=n(cr,"."),cr.forEach(t),vh=f(Q),u(He.$$.fragment,Q),bh=f(Q),Ue=o(Q,"DIV",{class:!0});var Bs=l(Ue);u(aa.$$.fragment,Bs),Eh=f(Bs),ra=o(Bs,"P",{});var Vs=l(ra);kh=n(Vs,"Create an instance from the content of "),$n=o(Vs,"CODE",{});var Kd=l($n);$h=n(Kd,"json_path"),Kd.forEach(t),Th=n(Vs,"."),Vs.forEach(t),Bs.forEach(t),Ch=f(Q),Ge=o(Q,"DIV",{class:!0});var qs=l(Ge);u(na.$$.fragment,qs),wh=f(qs),sa=o(qs,"P",{});var Rs=l(sa);yh=n(Rs,"Save the content of this instance in JSON format inside "),Tn=o(Rs,"CODE",{});var Qd=l(Tn);Ah=n(Qd,"json_path"),Qd.forEach(t),Dh=n(Rs,"."),Rs.forEach(t),qs.forEach(t),Q.forEach(t),as=f(a),ve=o(a,"H2",{class:!0});var Hs=l(ve);Je=o(Hs,"A",{id:!0,class:!0,href:!0});var Zd=l(Je);Cn=o(Zd,"SPAN",{});var ep=l(Cn);u(oa.$$.fragment,ep),ep.forEach(t),Zd.forEach(t),Lh=f(Hs),wn=o(Hs,"SPAN",{});var tp=l(wn);Sh=n(tp,"TrainerControl"),tp.forEach(t),Hs.forEach(t),rs=f(a),be=o(a,"DIV",{class:!0});var Us=l(be);u(la.$$.fragment,Us),Ph=f(Us),Ee=o(Us,"P",{});var fr=l(Ee);Oh=n(fr,"A class that handles the "),Ka=o(fr,"A",{href:!0});var ap=l(Ka);Ih=n(ap,"Trainer"),ap.forEach(t),Mh=n(fr," control flow. This class is used by the "),Qa=o(fr,"A",{href:!0});var rp=l(Qa);xh=n(rp,"TrainerCallback"),rp.forEach(t),Fh=n(fr,` to activate some switches in the training loop.`),fr.forEach(t),Us.forEach(t),this.h()},h(){i(w,"name","hf:doc:metadata"),i(w,"content",JSON.stringify(hp)),i(P,"id","callbacks"),i(P,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(P,"href","#callbacks"),i(D,"class","relative group"),i(ha,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),i(ma,"href","/docs/transformers/pr_16143/en/main_classes/callback#transformers.TrainerControl"),i(da,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),i(pa,"href","trainer"),i(ga,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),i(ua,"href","/docs/transformers/pr_16143/en/main_classes/callback#transformers.DefaultFlowCallback"),i(va,"href","/docs/transformers/pr_16143/en/main_classes/callback#transformers.PrinterCallback"),i(ba,"href","/docs/transformers/pr_16143/en/main_classes/callback#transformers.ProgressCallback"),i(Ea,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments"),i($a,"href","/docs/transformers/pr_16143/en/main_classes/callback#transformers.integrations.TensorBoardCallback"),i(Ta,"href","/docs/transformers/pr_16143/en/main_classes/callback#transformers.integrations.WandbCallback"),i(tt,"href","https://www.wandb.com/"),i(tt,"rel","nofollow"),i(Ca,"href","/docs/transformers/pr_16143/en/main_classes/callback#transformers.integrations.CometCallback"),i(at,"href","https://www.comet.ml/site/"),i(at,"rel","nofollow"),i(wa,"href","/docs/transformers/pr_16143/en/main_classes/callback#transformers.integrations.MLflowCallback"),i(rt,"href","https://www.mlflow.org/"),i(rt,"rel","nofollow"),i(ya,"href","/docs/transformers/pr_16143/en/main_classes/callback#transformers.integrations.AzureMLCallback"),i(nt,"href","https://pypi.org/project/azureml-sdk/"),i(nt,"rel","nofollow"),i(Aa,"href","/docs/transformers/pr_16143/en/main_classes/callback#transformers.integrations.CodeCarbonCallback"),i(st,"href","https://pypi.org/project/codecarbon/"),i(st,"rel","nofollow"),i(Da,"href","/docs/transformers/pr_16143/en/main_classes/callback#transformers.TrainerCallback"),i(La,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments"),i(Sa,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),i(Pa,"href","/docs/transformers/pr_16143/en/main_classes/callback#transformers.TrainerState"),i(Oa,"href","/docs/transformers/pr_16143/en/main_classes/callback#transformers.TrainerControl"),i(Ae,"id","transformers.integrations.CometCallback"),i(Ae,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(Ae,"href","#transformers.integrations.CometCallback"),i(ae,"class","relative group"),i(Ia,"href","/docs/transformers/pr_16143/en/main_classes/callback#transformers.TrainerCallback"),i(Ma,"href","/docs/transformers/pr_16143/en/main_classes/callback#transformers.TrainerCallback"),i(it,"href","https://www.comet.ml/site/"),i(it,"rel","nofollow"),i(ht,"href","https://www.comet.ml/docs/python-sdk/advanced/#comet-configuration-variables"),i(ht,"rel","nofollow"),i(N,"class","docstring"),i(R,"class","docstring"),i(xa,"href","/docs/transformers/pr_16143/en/main_classes/callback#transformers.TrainerCallback"),i(ne,"class","docstring"),i(Fa,"href","/docs/transformers/pr_16143/en/main_classes/callback#transformers.TrainerCallback"),i(se,"class","docstring"),i(ja,"href","/docs/transformers/pr_16143/en/main_classes/callback#transformers.TrainerCallback"),i(oe,"class","docstring"),i(Na,"href","/docs/transformers/pr_16143/en/main_classes/callback#transformers.TrainerCallback"),i(Wa,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments"),i(za,"href","/docs/transformers/pr_16143/en/main_classes/callback#transformers.TrainerState"),i(H,"class","docstring"),i(Ba,"href","/docs/transformers/pr_16143/en/main_classes/callback#transformers.TrainerCallback"),i(kt,"href","https://www.tensorflow.org/tensorboard"),i(kt,"rel","nofollow"),i(le,"class","docstring"),i(Va,"href","/docs/transformers/pr_16143/en/main_classes/callback#transformers.TrainerCallback"),i(Tt,"href","https://www.wandb.com/"),i(Tt,"rel","nofollow"),i(At,"href","https://docs.wandb.ai/integrations/huggingface"),i(At,"rel","nofollow"),i(W,"class","docstring"),i(G,"class","docstring"),i(qa,"href","/docs/transformers/pr_16143/en/main_classes/callback#transformers.TrainerCallback"),i(Lt,"href","https://www.mlflow.org/"),i(Lt,"rel","nofollow"),i(Ra,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments"),i(z,"class","docstring"),i(J,"class","docstring"),i(Ha,"href","/docs/transformers/pr_16143/en/main_classes/callback#transformers.TrainerCallback"),i(Ot,"href","https://pypi.org/project/azureml-sdk/"),i(Ot,"rel","nofollow"),i(me,"class","docstring"),i(Ua,"href","/docs/transformers/pr_16143/en/main_classes/callback#transformers.TrainerCallback"),i(pe,"class","docstring"),i(Le,"id","transformers.TrainerCallback"),i(Le,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(Le,"href","#transformers.TrainerCallback"),i(ge,"class","relative group"),i(Se,"class","docstring"),i(Pe,"class","docstring"),i(Oe,"class","docstring"),i(Ga,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),i(Ie,"class","docstring"),i(Me,"class","docstring"),i(xe,"class","docstring"),i(Fe,"class","docstring"),i(je,"class","docstring"),i(Ne,"class","docstring"),i(We,"class","docstring"),i(ze,"class","docstring"),i(Be,"class","docstring"),i(p,"class","docstring"),i(Ja,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),i(Re,"id","transformers.TrainerState"),i(Re,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(Re,"href","#transformers.TrainerState"),i(_e,"class","relative group"),i(Ya,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),i(Xa,"href","/docs/transformers/pr_16143/en/main_classes/callback#transformers.TrainerCallback"),i(Ue,"class","docstring"),i(Ge,"class","docstring"),i(I,"class","docstring"),i(Je,"id","transformers.TrainerControl"),i(Je,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(Je,"href","#transformers.TrainerControl"),i(ve,"class","relative group"),i(Ka,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),i(Qa,"href","/docs/transformers/pr_16143/en/main_classes/callback#transformers.TrainerCallback"),i(be,"class","docstring")},m(a,h){e(document.head,w),m(a,Y,h),m(a,D,h),e(D,P),e(P,Z),v(M,Z,null),e(D,et),e(D,ee),e(ee,te),m(a,x,h),m(a,q,h),e(q,fa),e(q,ha),e(ha,Gs),e(q,Js),m(a,On,h),m(a,j,h),e(j,Ys),e(j,ma),e(ma,Xs),e(j,Ks),e(j,da),e(da,Qs),e(j,Zs),e(j,pa),e(pa,eo),e(j,to),m(a,In,h),m(a,ke,h),e(ke,ao),e(ke,ga),e(ga,ro),e(ke,no),m(a,Mn,h),m(a,y,h),e(y,_a),e(_a,ua),e(ua,so),e(_a,oo),e(y,lo),e(y,X),e(X,va),e(va,io),e(X,co),e(X,ba),e(ba,fo),e(X,ho),e(X,Ea),e(Ea,mo),e(X,po),e(y,go),e(y,ka),e(ka,$a),e($a,_o),e(ka,uo),e(y,vo),e(y,$e),e($e,Ta),e(Ta,bo),e($e,Eo),e($e,tt),e(tt,ko),e($e,$o),e(y,To),e(y,Te),e(Te,Ca),e(Ca,Co),e(Te,wo),e(Te,at),e(at,yo),e(Te,Ao),e(y,Do),e(y,Ce),e(Ce,wa),e(wa,Lo),e(Ce,So),e(Ce,rt),e(rt,Po),e(Ce,Oo),e(y,Io),e(y,we),e(we,ya),e(ya,Mo),e(we,xo),e(we,nt),e(nt,Fo),e(we,jo),e(y,No),e(y,ye),e(ye,Aa),e(Aa,Wo),e(ye,zo),e(ye,st),e(st,Bo),e(ye,Vo),m(a,xn,h),m(a,L,h),e(L,qo),e(L,Da),e(Da,Ro),e(L,Ho),e(L,La),e(La,Uo),e(L,Go),e(L,Sa),e(Sa,Jo),e(L,Yo),e(L,Pa),e(Pa,Xo),e(L,Ko),e(L,Oa),e(Oa,Qo),e(L,Zo),m(a,Fn,h),m(a,ae,h),e(ae,Ae),e(Ae,mr),v(ot,mr,null),e(ae,el),e(ae,dr),e(dr,tl),m(a,jn,h),m(a,De,h),e(De,al),e(De,Ia),e(Ia,rl),e(De,nl),m(a,Nn,h),m(a,R,h),v(lt,R,null),e(R,sl),e(R,re),e(re,ol),e(re,Ma),e(Ma,ll),e(re,il),e(re,it),e(it,cl),e(re,fl),e(R,hl),e(R,N),v(ct,N,null),e(N,ml),e(N,pr),e(pr,dl),e(N,pl),e(N,C),e(C,gl),e(C,gr),e(gr,_l),e(C,ul),e(C,_r),e(_r,vl),e(C,bl),e(C,ur),e(ur,El),e(C,kl),e(C,vr),e(vr,$l),e(C,Tl),e(C,br),e(br,Cl),e(C,wl),e(C,Er),e(Er,yl),e(C,Al),e(C,kr),e(kr,Dl),e(C,Ll),e(C,$r),e($r,Sl),e(C,Pl),e(C,Tr),e(Tr,Ol),e(C,Il),e(N,Ml),e(N,ft),e(ft,xl),e(ft,ht),e(ht,Fl),e(ft,jl),m(a,Wn,h),m(a,ne,h),v(mt,ne,null),e(ne,Nl),e(ne,dt),e(dt,Wl),e(dt,xa),e(xa,zl),e(dt,Bl),m(a,zn,h),m(a,se,h),v(pt,se,null),e(se,Vl),e(se,gt),e(gt,ql),e(gt,Fa),e(Fa,Rl),e(gt,Hl),m(a,Bn,h),m(a,oe,h),v(_t,oe,null),e(oe,Ul),e(oe,ut),e(ut,Gl),e(ut,ja),e(ja,Jl),e(ut,Yl),m(a,Vn,h),m(a,H,h),v(vt,H,null),e(H,Xl),e(H,bt),e(bt,Kl),e(bt,Na),e(Na,Ql),e(bt,Zl),e(H,ei),e(H,U),e(U,ti),e(U,Wa),e(Wa,ai),e(U,ri),e(U,Cr),e(Cr,ni),e(U,si),e(U,za),e(za,oi),e(U,li),m(a,qn,h),m(a,le,h),v(Et,le,null),e(le,ii),e(le,ie),e(ie,ci),e(ie,Ba),e(Ba,fi),e(ie,hi),e(ie,kt),e(kt,mi),e(ie,di),m(a,Rn,h),m(a,G,h),v($t,G,null),e(G,pi),e(G,ce),e(ce,gi),e(ce,Va),e(Va,_i),e(ce,ui),e(ce,Tt),e(Tt,vi),e(ce,bi),e(G,Ei),e(G,W),v(Ct,W,null),e(W,ki),e(W,wt),e(wt,$i),e(wt,wr),e(wr,Ti),e(wt,Ci),e(W,wi),e(W,yt),e(yt,yi),e(yt,At),e(At,Ai),e(yt,Di),e(W,Li),e(W,d),e(d,Si),e(d,yr),e(yr,Pi),e(d,Oi),e(d,Ar),e(Ar,Ii),e(d,Mi),e(d,Dr),e(Dr,xi),e(d,Fi),e(d,Lr),e(Lr,ji),e(d,Ni),e(d,Sr),e(Sr,Wi),e(d,zi),e(d,Pr),e(Pr,Bi),e(d,Vi),e(d,Or),e(Or,qi),e(d,Ri),e(d,Ir),e(Ir,Hi),e(d,Ui),e(d,Mr),e(Mr,Gi),e(d,Ji),e(d,xr),e(xr,Yi),e(d,Xi),e(d,Fr),e(Fr,Ki),e(d,Qi),e(d,jr),e(jr,Zi),e(d,ec),e(d,Nr),e(Nr,tc),e(d,ac),e(d,Wr),e(Wr,rc),e(d,nc),e(d,zr),e(zr,sc),e(d,oc),e(d,Br),e(Br,lc),e(d,ic),e(d,Vr),e(Vr,cc),e(d,fc),e(d,qr),e(qr,hc),e(d,mc),e(d,Rr),e(Rr,dc),e(d,pc),m(a,Hn,h),m(a,J,h),v(Dt,J,null),e(J,gc),e(J,fe),e(fe,_c),e(fe,qa),e(qa,uc),e(fe,vc),e(fe,Lt),e(Lt,bc),e(fe,Ec),e(J,kc),e(J,z),v(St,z,null),e(z,$c),e(z,Hr),e(Hr,Tc),e(z,Cc),e(z,he),e(he,wc),e(he,Ur),e(Ur,yc),e(he,Ac),e(he,Gr),e(Gr,Dc),e(he,Lc),e(z,Sc),e(z,F),e(F,Pc),e(F,Jr),e(Jr,Oc),e(F,Ic),e(F,Yr),e(Yr,Mc),e(F,xc),e(F,Ra),e(Ra,Fc),e(F,jc),e(F,Xr),e(Xr,Nc),e(F,Wc),m(a,Un,h),m(a,me,h),v(Pt,me,null),e(me,zc),e(me,de),e(de,Bc),e(de,Ha),e(Ha,Vc),e(de,qc),e(de,Ot),e(Ot,Rc),e(de,Hc),m(a,Gn,h),m(a,pe,h),v(It,pe,null),e(pe,Uc),e(pe,Mt),e(Mt,Gc),e(Mt,Ua),e(Ua,Jc),e(Mt,Yc),m(a,Jn,h),m(a,ge,h),e(ge,Le),e(Le,Kr),v(xt,Kr,null),e(ge,Xc),e(ge,Qr),e(Qr,Kc),m(a,Yn,h),m(a,p,h),v(Ft,p,null),e(p,Qc),e(p,Zr),e(Zr,Zc),e(p,ef),e(p,jt),e(jt,tf),e(jt,en),e(en,af),e(jt,rf),e(p,nf),e(p,O),e(O,sf),e(O,tn),e(tn,of),e(O,lf),e(O,an),e(an,cf),e(O,ff),e(O,rn),e(rn,hf),e(O,mf),e(O,nn),e(nn,df),e(O,pf),e(O,sn),e(sn,gf),e(O,_f),e(p,uf),e(p,on),e(on,vf),e(p,bf),v(Nt,p,null),e(p,Ef),e(p,Se),v(Wt,Se,null),e(Se,kf),e(Se,ln),e(ln,$f),e(p,Tf),e(p,Pe),v(zt,Pe,null),e(Pe,Cf),e(Pe,cn),e(cn,wf),e(p,yf),e(p,Oe),v(Bt,Oe,null),e(Oe,Af),e(Oe,fn),e(fn,Df),e(p,Lf),e(p,Ie),v(Vt,Ie,null),e(Ie,Sf),e(Ie,qt),e(qt,Pf),e(qt,Ga),e(Ga,Of),e(qt,If),e(p,Mf),e(p,Me),v(Rt,Me,null),e(Me,xf),e(Me,hn),e(hn,Ff),e(p,jf),e(p,xe),v(Ht,xe,null),e(xe,Nf),e(xe,mn),e(mn,Wf),e(p,zf),e(p,Fe),v(Ut,Fe,null),e(Fe,Bf),e(Fe,dn),e(dn,Vf),e(p,qf),e(p,je),v(Gt,je,null),e(je,Rf),e(je,pn),e(pn,Hf),e(p,Uf),e(p,Ne),v(Jt,Ne,null),e(Ne,Gf),e(Ne,gn),e(gn,Jf),e(p,Yf),e(p,We),v(Yt,We,null),e(We,Xf),e(We,_n),e(_n,Kf),e(p,Qf),e(p,ze),v(Xt,ze,null),e(ze,Zf),e(ze,un),e(un,eh),e(p,th),e(p,Be),v(Kt,Be,null),e(Be,ah),e(Be,vn),e(vn,rh),m(a,Xn,h),m(a,Ve,h),e(Ve,nh),e(Ve,Ja),e(Ja,sh),e(Ve,oh),m(a,Kn,h),v(Qt,a,h),m(a,Qn,h),m(a,qe,h),e(qe,lh),e(qe,bn),e(bn,ih),e(qe,ch),m(a,Zn,h),v(Zt,a,h),m(a,es,h),m(a,_e,h),e(_e,Re),e(Re,En),v(ea,En,null),e(_e,fh),e(_e,kn),e(kn,hh),m(a,ts,h),m(a,I,h),v(ta,I,null),e(I,mh),e(I,ue),e(ue,dh),e(ue,Ya),e(Ya,ph),e(ue,gh),e(ue,Xa),e(Xa,_h),e(ue,uh),e(I,vh),v(He,I,null),e(I,bh),e(I,Ue),v(aa,Ue,null),e(Ue,Eh),e(Ue,ra),e(ra,kh),e(ra,$n),e($n,$h),e(ra,Th),e(I,Ch),e(I,Ge),v(na,Ge,null),e(Ge,wh),e(Ge,sa),e(sa,yh),e(sa,Tn),e(Tn,Ah),e(sa,Dh),m(a,as,h),m(a,ve,h),e(ve,Je),e(Je,Cn),v(oa,Cn,null),e(ve,Lh),e(ve,wn),e(wn,Sh),m(a,rs,h),m(a,be,h),v(la,be,null),e(be,Ph),e(be,Ee),e(Ee,Oh),e(Ee,Ka),e(Ka,Ih),e(Ee,Mh),e(Ee,Qa),e(Qa,xh),e(Ee,Fh),ns=!0},p(a,[h]){const ia={};h&2&&(ia.$$scope={dirty:h,ctx:a}),He.$set(ia)},i(a){ns||(b(M.$$.fragment,a),b(ot.$$.fragment,a),b(lt.$$.fragment,a),b(ct.$$.fragment,a),b(mt.$$.fragment,a),b(pt.$$.fragment,a),b(_t.$$.fragment,a),b(vt.$$.fragment,a),b(Et.$$.fragment,a),b($t.$$.fragment,a),b(Ct.$$.fragment,a),b(Dt.$$.fragment,a),b(St.$$.fragment,a),b(Pt.$$.fragment,a),b(It.$$.fragment,a),b(xt.$$.fragment,a),b(Ft.$$.fragment,a),b(Nt.$$.fragment,a),b(Wt.$$.fragment,a),b(zt.$$.fragment,a),b(Bt.$$.fragment,a),b(Vt.$$.fragment,a),b(Rt.$$.fragment,a),b(Ht.$$.fragment,a),b(Ut.$$.fragment,a),b(Gt.$$.fragment,a),b(Jt.$$.fragment,a),b(Yt.$$.fragment,a),b(Xt.$$.fragment,a),b(Kt.$$.fragment,a),b(Qt.$$.fragment,a),b(Zt.$$.fragment,a),b(ea.$$.fragment,a),b(ta.$$.fragment,a),b(He.$$.fragment,a),b(aa.$$.fragment,a),b(na.$$.fragment,a),b(oa.$$.fragment,a),b(la.$$.fragment,a),ns=!0)},o(a){E(M.$$.fragment,a),E(ot.$$.fragment,a),E(lt.$$.fragment,a),E(ct.$$.fragment,a),E(mt.$$.fragment,a),E(pt.$$.fragment,a),E(_t.$$.fragment,a),E(vt.$$.fragment,a),E(Et.$$.fragment,a),E($t.$$.fragment,a),E(Ct.$$.fragment,a),E(Dt.$$.fragment,a),E(St.$$.fragment,a),E(Pt.$$.fragment,a),E(It.$$.fragment,a),E(xt.$$.fragment,a),E(Ft.$$.fragment,a),E(Nt.$$.fragment,a),E(Wt.$$.fragment,a),E(zt.$$.fragment,a),E(Bt.$$.fragment,a),E(Vt.$$.fragment,a),E(Rt.$$.fragment,a),E(Ht.$$.fragment,a),E(Ut.$$.fragment,a),E(Gt.$$.fragment,a),E(Jt.$$.fragment,a),E(Yt.$$.fragment,a),E(Xt.$$.fragment,a),E(Kt.$$.fragment,a),E(Qt.$$.fragment,a),E(Zt.$$.fragment,a),E(ea.$$.fragment,a),E(ta.$$.fragment,a),E(He.$$.fragment,a),E(aa.$$.fragment,a),E(na.$$.fragment,a),E(oa.$$.fragment,a),E(la.$$.fragment,a),ns=!1},d(a){t(w),a&&t(Y),a&&t(D),k(M),a&&t(x),a&&t(q),a&&t(On),a&&t(j),a&&t(In),a&&t(ke),a&&t(Mn),a&&t(y),a&&t(xn),a&&t(L),a&&t(Fn),a&&t(ae),k(ot),a&&t(jn),a&&t(De),a&&t(Nn),a&&t(R),k(lt),k(ct),a&&t(Wn),a&&t(ne),k(mt),a&&t(zn),a&&t(se),k(pt),a&&t(Bn),a&&t(oe),k(_t),a&&t(Vn),a&&t(H),k(vt),a&&t(qn),a&&t(le),k(Et),a&&t(Rn),a&&t(G),k($t),k(Ct),a&&t(Hn),a&&t(J),k(Dt),k(St),a&&t(Un),a&&t(me),k(Pt),a&&t(Gn),a&&t(pe),k(It),a&&t(Jn),a&&t(ge),k(xt),a&&t(Yn),a&&t(p),k(Ft),k(Nt),k(Wt),k(zt),k(Bt),k(Vt),k(Rt),k(Ht),k(Ut),k(Gt),k(Jt),k(Yt),k(Xt),k(Kt),a&&t(Xn),a&&t(Ve),a&&t(Kn),k(Qt,a),a&&t(Qn),a&&t(qe),a&&t(Zn),k(Zt,a),a&&t(es),a&&t(_e),k(ea),a&&t(ts),a&&t(I),k(ta),k(He),k(aa),k(na),a&&t(as),a&&t(ve),k(oa),a&&t(rs),a&&t(be),k(la)}}}const hp={local:"callbacks",sections:[{local:"transformers.integrations.CometCallback",title:"Available Callbacks"},{local:"transformers.TrainerCallback",title:"TrainerCallback"},{local:"transformers.TrainerState",title:"TrainerState"},{local:"transformers.TrainerControl",title:"TrainerControl"}],title:"Callbacks"};function mp(hr,w,Y){let{fw:D}=w;return hr.$$set=P=>{"fw"in P&&Y(0,D=P.fw)},[D]}class bp extends np{constructor(w){super();sp(this,w,mp,fp,op,{fw:0})}}export{bp as default,hp as metadata};
403
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/main_classes/optimizer_schedules.mdx-d0982fc0.js
import{S as Ji,i as Ki,s as Qi,e as a,k as l,w as u,t as s,M as Xi,c as n,d as r,m as c,a as o,x as f,h as i,b as m,N as ka,F as t,g as d,y as g,L as Yi,q as _,o as w,B as v}from"../../chunks/vendor-4833417e.js";import{D as $}from"../../chunks/Docstring-4f315ed9.js";import{C as Na}from"../../chunks/CodeBlock-6a3d1b46.js";import{I as Z}from"../../chunks/IconCopyLink-4b81c553.js";import"../../chunks/CopyButton-dacfbfaf.js";function Zi(Fa){let S,dt,E,x,bt,_e,Ca,$t,Oa,Lr,ee,Ra,At,ja,qa,Pr,I,zt,Ua,Ga,we,Va,Et,Ma,Ha,Ba,xt,Ja,Wr,k,te,Tt,ve,Ka,Dt,Qa,Sr,T,ye,Xa,be,Ya,$e,Za,en,tn,re,Ae,rn,Lt,an,Ir,N,ae,Pt,ze,nn,Wt,on,kr,h,Ee,sn,ht,ln,xe,cn,mn,b,pn,St,dn,hn,Te,un,fn,It,gn,_n,kt,wn,vn,Nt,yn,bn,Ft,$n,An,Ct,zn,En,xn,Ot,Tn,Dn,De,Ln,Le,Pn,Wn,Sn,D,Pe,Rt,In,kn,We,jt,Nn,Fn,Se,Cn,Ie,On,Rn,jn,qt,Ut,qn,Un,Gt,Vt,Gn,Vn,Mt,Ht,Mn,Hn,Bt,Bn,Jn,ke,Kn,Jt,Qn,Xn,Ne,Yn,L,Zn,Kt,eo,to,ut,ro,ao,Qt,no,oo,so,Fe,io,Xt,lo,co,Ce,mo,ne,Oe,po,Yt,ho,Nr,F,oe,Zt,Re,uo,er,fo,Fr,z,je,go,C,_o,tr,wo,vo,qe,yo,bo,$o,rr,Ao,zo,se,Ue,Eo,ar,xo,Cr,O,Ge,To,nr,Do,Or,R,ie,or,Ve,Lo,sr,Po,Rr,j,le,ir,Me,Wo,lr,So,jr,q,He,Io,cr,ko,qr,U,Be,No,mr,Fo,Ur,G,Je,Co,pr,Oo,Gr,V,Ke,Ro,dr,jo,Vr,Qe,ys,Mr,M,Xe,qo,hr,Uo,Hr,Ye,bs,Br,H,Ze,Go,ur,Vo,Jr,et,$s,Kr,B,tt,Mo,fr,Ho,Qr,rt,As,Xr,P,at,Bo,nt,Jo,gr,Ko,Qo,Xo,ce,Yo,_r,Zo,es,ot,ts,Yr,J,me,wr,st,rs,vr,as,Zr,K,it,ns,yr,os,ea,Q,pe,br,lt,ss,$r,is,ta,X,de,Ar,ct,ls,zr,cs,ra,W,mt,ms,Y,ps,Er,ds,hs,xr,us,fs,gs,he,pt,_s,Tr,ws,aa;return _e=new Z({}),ve=new Z({}),ye=new $({props:{name:"class transformers.AdamW",anchor:"transformers.AdamW",parameters:[{name:"params",val:": typing.Iterable[torch.nn.parameter.Parameter]"},{name:"lr",val:": float = 0.001"},{name:"betas",val:": typing.Tuple[float, float] = (0.9, 0.999)"},{name:"eps",val:": float = 1e-06"},{name:"weight_decay",val:": float = 0.0"},{name:"correct_bias",val:": bool = True"},{name:"no_deprecation_warning",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/optimization.py#L273",parametersDescription:[{anchor:"transformers.AdamW.params",description:`<strong>params</strong> (<code>Iterable[nn.parameter.Parameter]</code>) &#x2014; Iterable of parameters to optimize or dictionaries defining parameter groups.`,name:"params"},{anchor:"transformers.AdamW.lr",description:`<strong>lr</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-3) &#x2014; The learning rate to use.`,name:"lr"},{anchor:"transformers.AdamW.betas",description:`<strong>betas</strong> (<code>Tuple[float,float]</code>, <em>optional</em>, defaults to (0.9, 0.999)) &#x2014; Adam&#x2019;s betas parameters (b1, b2).`,name:"betas"},{anchor:"transformers.AdamW.eps",description:`<strong>eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-6) &#x2014; Adam&#x2019;s epsilon for numerical stability.`,name:"eps"},{anchor:"transformers.AdamW.weight_decay",description:`<strong>weight_decay</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; Decoupled weight decay to apply.`,name:"weight_decay"},{anchor:"transformers.AdamW.correct_bias",description:`<strong>correct_bias</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to correct bias in Adam (for instance, in Bert TF repository they use <code>False</code>).`,name:"correct_bias"},{anchor:"transformers.AdamW.no_deprecation_warning",description:`<strong>no_deprecation_warning</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; A flag used to disable the deprecation warning (set to <code>True</code> to disable the warning).`,name:"no_deprecation_warning"}]}}),Ae=new $({props:{name:"step",anchor:"transformers.AdamW.step",parameters:[{name:"closure",val:": typing.Callable = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/optimization.py#L323",parametersDescription:[{anchor:"transformers.AdamW.step.closure",description:"<strong>closure</strong> (<code>Callable</code>, <em>optional</em>) &#x2014; A closure that reevaluates the model and returns the loss.",name:"closure"}]}}),ze=new Z({}),Ee=new $({props:{name:"class transformers.Adafactor",anchor:"transformers.Adafactor",parameters:[{name:"params",val:""},{name:"lr",val:" = None"},{name:"eps",val:" = (1e-30, 0.001)"},{name:"clip_threshold",val:" = 1.0"},{name:"decay_rate",val:" = -0.8"},{name:"beta1",val:" = None"},{name:"weight_decay",val:" = 0.0"},{name:"scale_parameter",val:" = True"},{name:"relative_step",val:" = True"},{name:"warmup_init",val:" = False"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/optimization.py#L385",parametersDescription:[{anchor:"transformers.Adafactor.params",description:`<strong>params</strong> (<code>Iterable[nn.parameter.Parameter]</code>) &#x2014; Iterable of parameters to optimize or dictionaries defining parameter groups.`,name:"params"},{anchor:"transformers.Adafactor.lr",description:`<strong>lr</strong> (<code>float</code>, <em>optional</em>) &#x2014; The external learning rate.`,name:"lr"},{anchor:"transformers.Adafactor.eps",description:`<strong>eps</strong> (<code>Tuple[float, float]</code>, <em>optional</em>, defaults to (1e-30, 1e-3)) &#x2014; Regularization constants for square gradient and parameter scale respectively`,name:"eps"},{anchor:"transformers.Adafactor.clip_threshold",description:`<strong>clip_threshold</strong> (<code>float</code>, <em>optional</em>, defaults 1.0) &#x2014; Threshold of root mean square of final gradient update`,name:"clip_threshold"},{anchor:"transformers.Adafactor.decay_rate",description:`<strong>decay_rate</strong> (<code>float</code>, <em>optional</em>, defaults to -0.8) &#x2014; Coefficient used to compute running averages of square`,name:"decay_rate"},{anchor:"transformers.Adafactor.beta1",description:`<strong>beta1</strong> (<code>float</code>, <em>optional</em>) &#x2014; Coefficient used for computing running averages of gradient`,name:"beta1"},{anchor:"transformers.Adafactor.weight_decay",description:`<strong>weight_decay</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; Weight decay (L2 penalty)`,name:"weight_decay"},{anchor:"transformers.Adafactor.scale_parameter",description:`<strong>scale_parameter</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If True, learning rate is scaled by root mean square`,name:"scale_parameter"},{anchor:"transformers.Adafactor.relative_step",description:`<strong>relative_step</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If True, time-dependent learning rate is computed instead of external learning rate`,name:"relative_step"},{anchor:"transformers.Adafactor.warmup_init",description:`<strong>warmup_init</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Time-dependent learning rate computation depends on whether warm-up initialization is being used`,name:"warmup_init"}]}}),ke=new Na({props:{code:"Adafactor(model.parameters(), scale_parameter=False, relative_step=False, warmup_init=False, lr=1e-3)",highlighted:'Adafactor(model.parameters(), scale_parameter=<span class="hljs-literal">False</span>, relative_step=<span class="hljs-literal">False</span>, warmup_init=<span class="hljs-literal">False</span>, lr=<span class="hljs-number">1e-3</span>)'}}),Ne=new Na({props:{code:"Adafactor(model.parameters(), scale_parameter=True, relative_step=True, warmup_init=True, lr=None)",highlighted:'Adafactor(model.parameters(), scale_parameter=<span class="hljs-literal">True</span>, relative_step=<span class="hljs-literal">True</span>, warmup_init=<span class="hljs-literal">True</span>, lr=<span class="hljs-literal">None</span>)'}}),Fe=new Na({props:{code:`from transformers.optimization import Adafactor, AdafactorSchedule optimizer = Adafactor(model.parameters(), scale_parameter=True, relative_step=True, warmup_init=True, lr=None) lr_scheduler = AdafactorSchedule(optimizer) trainer = Trainer(..., optimizers=(optimizer, lr_scheduler))`,highlighted:`<span class="hljs-keyword">from</span> transformers.optimization <span class="hljs-keyword">import</span> Adafactor, AdafactorSchedule optimizer = Adafactor(model.parameters(), scale_parameter=<span class="hljs-literal">True</span>, relative_step=<span class="hljs-literal">True</span>, warmup_init=<span class="hljs-literal">True</span>, lr=<span class="hljs-literal">None</span>) lr_scheduler = AdafactorSchedule(optimizer) trainer = Trainer(..., optimizers=(optimizer, lr_scheduler))`}}),Ce=new Na({props:{code:`# replace AdamW with Adafactor optimizer = Adafactor( model.parameters(), lr=1e-3, eps=(1e-30, 1e-3), clip_threshold=1.0, decay_rate=-0.8, beta1=None, weight_decay=0.0, relative_step=False, scale_parameter=False, warmup_init=False, )`,highlighted:`<span class="hljs-comment"># replace AdamW with Adafactor</span> optimizer = Adafactor( model.parameters(), lr=<span class="hljs-number">1e-3</span>, eps=(<span class="hljs-number">1e-30</span>, <span class="hljs-number">1e-3</span>), clip_threshold=<span class="hljs-number">1.0</span>, decay_rate=-<span class="hljs-number">0.8</span>, beta1=<span class="hljs-literal">None</span>, weight_decay=<span class="hljs-number">0.0</span>, relative_step=<span class="hljs-literal">False</span>, scale_parameter=<span class="hljs-literal">False</span>, warmup_init=<span class="hljs-literal">False</span>, )`}}),Oe=new $({props:{name:"step",anchor:"transformers.Adafactor.step",parameters:[{name:"closure",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/optimization.py#L531",parametersDescription:[{anchor:"transformers.Adafactor.step.closure",description:`<strong>closure</strong> (callable, optional) &#x2014; A closure that reevaluates the model and returns the loss.`,name:"closure"}]}}),Re=new Z({}),je=new $({props:{name:"class transformers.AdamWeightDecay",anchor:"transformers.AdamWeightDecay",parameters:[{name:"learning_rate",val:": typing.Union[float, keras.optimizer_v2.learning_rate_schedule.LearningRateSchedule] = 0.001"},{name:"beta_1",val:": float = 0.9"},{name:"beta_2",val:": float = 0.999"},{name:"epsilon",val:": float = 1e-07"},{name:"amsgrad",val:": bool = False"},{name:"weight_decay_rate",val:": float = 0.0"},{name:"include_in_weight_decay",val:": typing.Optional[typing.List[str]] = None"},{name:"exclude_from_weight_decay",val:": typing.Optional[typing.List[str]] = None"},{name:"name",val:": str = 'AdamWeightDecay'"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/optimization_tf.py#L152",parametersDescription:[{anchor:"transformers.AdamWeightDecay.learning_rate",description:`<strong>learning_rate</strong> (<code>Union[float, tf.keras.optimizers.schedules.LearningRateSchedule]</code>, <em>optional</em>, defaults to 1e-3) &#x2014; The learning rate to use or a schedule.`,name:"learning_rate"},{anchor:"transformers.AdamWeightDecay.beta_1",description:`<strong>beta_1</strong> (<code>float</code>, <em>optional</em>, defaults to 0.9) &#x2014; The beta1 parameter in Adam, which is the exponential decay rate for the 1st momentum estimates.`,name:"beta_1"},{anchor:"transformers.AdamWeightDecay.beta_2",description:`<strong>beta_2</strong> (<code>float</code>, <em>optional</em>, defaults to 0.999) &#x2014; The beta2 parameter in Adam, which is the exponential decay rate for the 2nd momentum estimates.`,name:"beta_2"},{anchor:"transformers.AdamWeightDecay.epsilon",description:`<strong>epsilon</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-7) &#x2014; The epsilon parameter in Adam, which is a small constant for numerical stability.`,name:"epsilon"},{anchor:"transformers.AdamWeightDecay.amsgrad",description:`<strong>amsgrad</strong> (<code>bool</code>, <em>optional</em>, default to <code>False</code>) &#x2014; Whether to apply AMSGrad variant of this algorithm or not, see <a href="https://arxiv.org/abs/1904.09237" rel="nofollow">On the Convergence of Adam and Beyond</a>.`,name:"amsgrad"},{anchor:"transformers.AdamWeightDecay.weight_decay_rate",description:`<strong>weight_decay_rate</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; The weight decay to apply.`,name:"weight_decay_rate"},{anchor:"transformers.AdamWeightDecay.include_in_weight_decay",description:`<strong>include_in_weight_decay</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; List of the parameter names (or re patterns) to apply weight decay to. If none is passed, weight decay is applied to all parameters by default (unless they are in <code>exclude_from_weight_decay</code>).`,name:"include_in_weight_decay"},{anchor:"transformers.AdamWeightDecay.exclude_from_weight_decay",description:`<strong>exclude_from_weight_decay</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; List of the parameter names (or re patterns) to exclude from applying weight decay to. If a <code>include_in_weight_decay</code> is passed, the names in it will supersede this list.`,name:"exclude_from_weight_decay"},{anchor:"transformers.AdamWeightDecay.name",description:`<strong>name</strong> (<code>str</code>, <em>optional</em>, defaults to &#x2018;AdamWeightDecay&#x2019;) &#x2014; Optional name for the operations created when applying gradients. kwargs &#x2014; Keyword arguments. Allowed to be {<code>clipnorm</code>, <code>clipvalue</code>, <code>lr</code>, <code>decay</code>}. <code>clipnorm</code> is clip gradients by norm; <code>clipvalue</code> is clip gradients by value, <code>decay</code> is included for backward compatibility to allow time inverse decay of learning rate. <code>lr</code> is included for backward compatibility, recommended to use <code>learning_rate</code> instead.`,name:"name"}]}}),Ue=new $({props:{name:"from_config",anchor:"transformers.AdamWeightDecay.from_config",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/optimization_tf.py#L209"}}),Ge=new $({props:{name:"transformers.create_optimizer",anchor:"transformers.create_optimizer",parameters:[{name:"init_lr",val:": float"},{name:"num_train_steps",val:": int"},{name:"num_warmup_steps",val:": int"},{name:"min_lr_ratio",val:": float = 0.0"},{name:"adam_beta1",val:": float = 0.9"},{name:"adam_beta2",val:": float = 0.999"},{name:"adam_epsilon",val:": float = 1e-08"},{name:"weight_decay_rate",val:": float = 0.0"},{name:"power",val:": float = 1.0"},{name:"include_in_weight_decay",val:": typing.Optional[typing.List[str]] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/optimization_tf.py#L82",parametersDescription:[{anchor:"transformers.create_optimizer.init_lr",description:`<strong>init_lr</strong> (<code>float</code>) &#x2014; The desired learning rate at the end of the warmup phase.`,name:"init_lr"},{anchor:"transformers.create_optimizer.num_train_steps",description:`<strong>num_train_steps</strong> (<code>int</code>) &#x2014; The total number of training steps.`,name:"num_train_steps"},{anchor:"transformers.create_optimizer.num_warmup_steps",description:`<strong>num_warmup_steps</strong> (<code>int</code>) &#x2014; The number of warmup steps.`,name:"num_warmup_steps"},{anchor:"transformers.create_optimizer.min_lr_ratio",description:`<strong>min_lr_ratio</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; The final learning rate at the end of the linear decay will be <code>init_lr * min_lr_ratio</code>.`,name:"min_lr_ratio"},{anchor:"transformers.create_optimizer.adam_beta1",description:`<strong>adam_beta1</strong> (<code>float</code>, <em>optional</em>, defaults to 0.9) &#x2014; The beta1 to use in Adam.`,name:"adam_beta1"},{anchor:"transformers.create_optimizer.adam_beta2",description:`<strong>adam_beta2</strong> (<code>float</code>, <em>optional</em>, defaults to 0.999) &#x2014; The beta2 to use in Adam.`,name:"adam_beta2"},{anchor:"transformers.create_optimizer.adam_epsilon",description:`<strong>adam_epsilon</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-8) &#x2014; The epsilon to use in Adam.`,name:"adam_epsilon"},{anchor:"transformers.create_optimizer.weight_decay_rate",description:`<strong>weight_decay_rate</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; The weight decay to use.`,name:"weight_decay_rate"},{anchor:"transformers.create_optimizer.power",description:`<strong>power</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; The power to use for PolynomialDecay.`,name:"power"},{anchor:"transformers.create_optimizer.include_in_weight_decay",description:`<strong>include_in_weight_decay</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; List of the parameter names (or re patterns) to apply weight decay to. If none is passed, weight decay is applied to all parameters except bias and layer norm parameters.`,name:"include_in_weight_decay"}]}}),Ve=new Z({}),Me=new Z({}),He=new $({props:{name:"class transformers.SchedulerType",anchor:"transformers.SchedulerType",parameters:[{name:"value",val:""},{name:"names",val:" = None"},{name:"module",val:" = None"},{name:"qualname",val:" = None"},{name:"type",val:" = None"},{name:"start",val:" = 1"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_utils.py#L301"}}),Be=new $({props:{name:"transformers.get_scheduler",anchor:"transformers.get_scheduler",parameters:[{name:"name",val:": typing.Union[str, transformers.trainer_utils.SchedulerType]"},{name:"optimizer",val:": Optimizer"},{name:"num_warmup_steps",val:": typing.Optional[int] = None"},{name:"num_training_steps",val:": typing.Optional[int] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/optimization.py#L233",parametersDescription:[{anchor:"transformers.get_scheduler.name",description:`<strong>name</strong> (<code>str</code> or <code>SchedulerType</code>) &#x2014; The name of the scheduler to use.`,name:"name"},{anchor:"transformers.get_scheduler.optimizer",description:`<strong>optimizer</strong> (<code>torch.optim.Optimizer</code>) &#x2014; The optimizer that will be used during training.`,name:"optimizer"},{anchor:"transformers.get_scheduler.num_warmup_steps",description:`<strong>num_warmup_steps</strong> (<code>int</code>, <em>optional</em>) &#x2014; The number of warmup steps to do. This is not required by all schedulers (hence the argument being optional), the function will raise an error if it&#x2019;s unset and the scheduler type requires it.`,name:"num_warmup_steps"},{anchor:"transformers.get_scheduler.num_training_steps",description:`<strong>num_training_steps</strong> (\`int&#x201C;, <em>optional</em>) &#x2014; The number of training steps to do. This is not required by all schedulers (hence the argument being optional), the function will raise an error if it&#x2019;s unset and the scheduler type requires it.`,name:"num_training_steps"}]}}),Je=new $({props:{name:"transformers.get_constant_schedule",anchor:"transformers.get_constant_schedule",parameters:[{name:"optimizer",val:": Optimizer"},{name:"last_epoch",val:": int = -1"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/optimization.py#L34",parametersDescription:[{anchor:"transformers.get_constant_schedule.optimizer",description:`<strong>optimizer</strong> (<code>Optimizer</code>) &#x2014; The optimizer for which to schedule the learning rate.`,name:"optimizer"},{anchor:"transformers.get_constant_schedule.last_epoch",description:`<strong>last_epoch</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; The index of the last epoch when resuming training.`,name:"last_epoch"}],returnDescription:` <p><code>torch.optim.lr_scheduler.LambdaLR</code> with the appropriate schedule.</p> `}}),Ke=new $({props:{name:"transformers.get_constant_schedule_with_warmup",anchor:"transformers.get_constant_schedule_with_warmup",parameters:[{name:"optimizer",val:": Optimizer"},{name:"num_warmup_steps",val:": int"},{name:"last_epoch",val:": int = -1"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/optimization.py#L50",parametersDescription:[{anchor:"transformers.get_constant_schedule_with_warmup.optimizer",description:`<strong>optimizer</strong> (<code>Optimizer</code>) &#x2014; The optimizer for which to schedule the learning rate.`,name:"optimizer"},{anchor:"transformers.get_constant_schedule_with_warmup.num_warmup_steps",description:`<strong>num_warmup_steps</strong> (<code>int</code>) &#x2014; The number of steps for the warmup phase.`,name:"num_warmup_steps"},{anchor:"transformers.get_constant_schedule_with_warmup.last_epoch",description:`<strong>last_epoch</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; The index of the last epoch when resuming training.`,name:"last_epoch"}],returnDescription:` <p><code>torch.optim.lr_scheduler.LambdaLR</code> with the appropriate schedule.</p> `}}),Xe=new $({props:{name:"transformers.get_cosine_schedule_with_warmup",anchor:"transformers.get_cosine_schedule_with_warmup",parameters:[{name:"optimizer",val:": Optimizer"},{name:"num_warmup_steps",val:": int"},{name:"num_training_steps",val:": int"},{name:"num_cycles",val:": float = 0.5"},{name:"last_epoch",val:": int = -1"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/optimization.py#L104",parametersDescription:[{anchor:"transformers.get_cosine_schedule_with_warmup.optimizer",description:`<strong>optimizer</strong> (<code>Optimizer</code>) &#x2014; The optimizer for which to schedule the learning rate.`,name:"optimizer"},{anchor:"transformers.get_cosine_schedule_with_warmup.num_warmup_steps",description:`<strong>num_warmup_steps</strong> (<code>int</code>) &#x2014; The number of steps for the warmup phase.`,name:"num_warmup_steps"},{anchor:"transformers.get_cosine_schedule_with_warmup.num_training_steps",description:`<strong>num_training_steps</strong> (<code>int</code>) &#x2014; The total number of training steps.`,name:"num_training_steps"},{anchor:"transformers.get_cosine_schedule_with_warmup.num_cycles",description:`<strong>num_cycles</strong> (<code>float</code>, <em>optional</em>, defaults to 0.5) &#x2014; The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0 following a half-cosine).`,name:"num_cycles"},{anchor:"transformers.get_cosine_schedule_with_warmup.last_epoch",description:`<strong>last_epoch</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; The index of the last epoch when resuming training.`,name:"last_epoch"}],returnDescription:` <p><code>torch.optim.lr_scheduler.LambdaLR</code> with the appropriate schedule.</p> `}}),Ze=new $({props:{name:"transformers.get_cosine_with_hard_restarts_schedule_with_warmup",anchor:"transformers.get_cosine_with_hard_restarts_schedule_with_warmup",parameters:[{name:"optimizer",val:": Optimizer"},{name:"num_warmup_steps",val:": int"},{name:"num_training_steps",val:": int"},{name:"num_cycles",val:": int = 1"},{name:"last_epoch",val:": int = -1"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/optimization.py#L138",parametersDescription:[{anchor:"transformers.get_cosine_with_hard_restarts_schedule_with_warmup.optimizer",description:`<strong>optimizer</strong> (<code>Optimizer</code>) &#x2014; The optimizer for which to schedule the learning rate.`,name:"optimizer"},{anchor:"transformers.get_cosine_with_hard_restarts_schedule_with_warmup.num_warmup_steps",description:`<strong>num_warmup_steps</strong> (<code>int</code>) &#x2014; The number of steps for the warmup phase.`,name:"num_warmup_steps"},{anchor:"transformers.get_cosine_with_hard_restarts_schedule_with_warmup.num_training_steps",description:`<strong>num_training_steps</strong> (<code>int</code>) &#x2014; The total number of training steps.`,name:"num_training_steps"},{anchor:"transformers.get_cosine_with_hard_restarts_schedule_with_warmup.num_cycles",description:`<strong>num_cycles</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; The number of hard restarts to use.`,name:"num_cycles"},{anchor:"transformers.get_cosine_with_hard_restarts_schedule_with_warmup.last_epoch",description:`<strong>last_epoch</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; The index of the last epoch when resuming training.`,name:"last_epoch"}],returnDescription:` <p><code>torch.optim.lr_scheduler.LambdaLR</code> with the appropriate schedule.</p> `}}),tt=new $({props:{name:"transformers.get_linear_schedule_with_warmup",anchor:"transformers.get_linear_schedule_with_warmup",parameters:[{name:"optimizer",val:""},{name:"num_warmup_steps",val:""},{name:"num_training_steps",val:""},{name:"last_epoch",val:" = -1"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/optimization.py#L75",parametersDescription:[{anchor:"transformers.get_linear_schedule_with_warmup.optimizer",description:`<strong>optimizer</strong> (<code>Optimizer</code>) &#x2014; The optimizer for which to schedule the learning rate.`,name:"optimizer"},{anchor:"transformers.get_linear_schedule_with_warmup.num_warmup_steps",description:`<strong>num_warmup_steps</strong> (<code>int</code>) &#x2014; The number of steps for the warmup phase.`,name:"num_warmup_steps"},{anchor:"transformers.get_linear_schedule_with_warmup.num_training_steps",description:`<strong>num_training_steps</strong> (<code>int</code>) &#x2014; The total number of training steps.`,name:"num_training_steps"},{anchor:"transformers.get_linear_schedule_with_warmup.last_epoch",description:`<strong>last_epoch</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; The index of the last epoch when resuming training.`,name:"last_epoch"}],returnDescription:` <p><code>torch.optim.lr_scheduler.LambdaLR</code> with the appropriate schedule.</p> `}}),at=new $({props:{name:"transformers.get_polynomial_decay_schedule_with_warmup",anchor:"transformers.get_polynomial_decay_schedule_with_warmup",parameters:[{name:"optimizer",val:""},{name:"num_warmup_steps",val:""},{name:"num_training_steps",val:""},{name:"lr_end",val:" = 1e-07"},{name:"power",val:" = 1.0"},{name:"last_epoch",val:" = -1"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/optimization.py#L173",parametersDescription:[{anchor:"transformers.get_polynomial_decay_schedule_with_warmup.optimizer",description:`<strong>optimizer</strong> (<code>Optimizer</code>) &#x2014; The optimizer for which to schedule the learning rate.`,name:"optimizer"},{anchor:"transformers.get_polynomial_decay_schedule_with_warmup.num_warmup_steps",description:`<strong>num_warmup_steps</strong> (<code>int</code>) &#x2014; The number of steps for the warmup phase.`,name:"num_warmup_steps"},{anchor:"transformers.get_polynomial_decay_schedule_with_warmup.num_training_steps",description:`<strong>num_training_steps</strong> (<code>int</code>) &#x2014; The total number of training steps.`,name:"num_training_steps"},{anchor:"transformers.get_polynomial_decay_schedule_with_warmup.lr_end",description:`<strong>lr_end</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-7) &#x2014; The end LR.`,name:"lr_end"},{anchor:"transformers.get_polynomial_decay_schedule_with_warmup.power",description:`<strong>power</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; Power factor.`,name:"power"},{anchor:"transformers.get_polynomial_decay_schedule_with_warmup.last_epoch",description:`<strong>last_epoch</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; The index of the last epoch when resuming training.`,name:"last_epoch"}],returnDescription:` <p><code>torch.optim.lr_scheduler.LambdaLR</code> with the appropriate schedule.</p> `}}),st=new Z({}),it=new $({props:{name:"class transformers.WarmUp",anchor:"transformers.WarmUp",parameters:[{name:"initial_learning_rate",val:": float"},{name:"decay_schedule_fn",val:": typing.Callable"},{name:"warmup_steps",val:": int"},{name:"power",val:": float = 1.0"},{name:"name",val:": str = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/optimization_tf.py#L24",parametersDescription:[{anchor:"transformers.WarmUp.initial_learning_rate",description:`<strong>initial_learning_rate</strong> (<code>float</code>) &#x2014; The initial learning rate for the schedule after the warmup (so this will be the learning rate at the end of the warmup).`,name:"initial_learning_rate"},{anchor:"transformers.WarmUp.decay_schedule_fn",description:`<strong>decay_schedule_fn</strong> (<code>Callable</code>) &#x2014; The schedule function to apply after the warmup for the rest of training.`,name:"decay_schedule_fn"},{anchor:"transformers.WarmUp.warmup_steps",description:`<strong>warmup_steps</strong> (<code>int</code>) &#x2014; The number of steps for the warmup part of training.`,name:"warmup_steps"},{anchor:"transformers.WarmUp.power",description:`<strong>power</strong> (<code>float</code>, <em>optional</em>, defaults to 1) &#x2014; The power to use for the polynomial warmup (defaults is a linear warmup).`,name:"power"},{anchor:"transformers.WarmUp.name",description:`<strong>name</strong> (<code>str</code>, <em>optional</em>) &#x2014; Optional name prefix for the returned tensors during the schedule.`,name:"name"}]}}),lt=new Z({}),ct=new Z({}),mt=new $({props:{name:"class transformers.GradientAccumulator",anchor:"transformers.GradientAccumulator",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/optimization_tf.py#L282"}}),pt=new $({props:{name:"reset",anchor:"transformers.GradientAccumulator.reset",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/optimization_tf.py#L344"}}),{c(){S=a("meta"),dt=l(),E=a("h1"),x=a("a"),bt=a("span"),u(_e.$$.fragment),Ca=l(),$t=a("span"),Oa=s("Optimization"),Lr=l(),ee=a("p"),Ra=s("The "),At=a("code"),ja=s(".optimization"),qa=s(" module provides:"),Pr=l(),I=a("ul"),zt=a("li"),Ua=s("an optimizer with weight decay fixed that can be used to fine-tuned models, and"),Ga=l(),we=a("li"),Va=s("several schedules in the form of schedule objects that inherit from "),Et=a("code"),Ma=s("_LRSchedule"),Ha=s(":"),Ba=l(),xt=a("li"),Ja=s("a gradient accumulation class to accumulate the gradients of multiple batches"),Wr=l(),k=a("h2"),te=a("a"),Tt=a("span"),u(ve.$$.fragment),Ka=l(),Dt=a("span"),Qa=s("AdamW (PyTorch)"),Sr=l(),T=a("div"),u(ye.$$.fragment),Xa=l(),be=a("p"),Ya=s("Implements Adam algorithm with weight decay fix as introduced in "),$e=a("a"),Za=s(`Decoupled Weight Decay Regularization`),en=s("."),tn=l(),re=a("div"),u(Ae.$$.fragment),rn=l(),Lt=a("p"),an=s("Performs a single optimization step."),Ir=l(),N=a("h2"),ae=a("a"),Pt=a("span"),u(ze.$$.fragment),nn=l(),Wt=a("span"),on=s("AdaFactor (PyTorch)"),kr=l(),h=a("div"),u(Ee.$$.fragment),sn=l(),ht=a("p"),ln=s(`AdaFactor pytorch implementation can be used as a drop in replacement for Adam original fairseq code: `),xe=a("a"),cn=s("https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py"),mn=l(),b=a("p"),pn=s("Paper: "),St=a("em"),dn=s("Adafactor: Adaptive Learning Rates with Sublinear Memory Cost"),hn=l(),Te=a("a"),un=s("https://arxiv.org/abs/1804.04235"),fn=s(` Note that this optimizer internally adjusts the learning rate depending on the `),It=a("code"),gn=s("scale_parameter"),_n=s(", "),kt=a("code"),wn=s("relative_step"),vn=s(` and `),Nt=a("code"),yn=s("warmup_init"),bn=s(" options. To use a manual (external) learning rate schedule you should set "),Ft=a("code"),$n=s("scale_parameter=False"),An=s(` and `),Ct=a("code"),zn=s("relative_step=False"),En=s("."),xn=l(),Ot=a("p"),Tn=s("This implementation handles low-precision (FP16, bfloat) values, but we have not thoroughly tested."),Dn=l(),De=a("p"),Ln=s("Recommended T5 finetuning settings ("),Le=a("a"),Pn=s("https://discuss.huggingface.co/t/t5-finetuning-tips/684/3"),Wn=s("):"),Sn=l(),D=a("ul"),Pe=a("li"),Rt=a("p"),In=s("Training without LR warmup or clip_threshold is not recommended."),kn=l(),We=a("ul"),jt=a("li"),Nn=s("use scheduled LR warm-up to fixed LR"),Fn=l(),Se=a("li"),Cn=s("use clip_threshold=1.0 ("),Ie=a("a"),On=s("https://arxiv.org/abs/1804.04235"),Rn=s(")"),jn=l(),qt=a("li"),Ut=a("p"),qn=s("Disable relative updates"),Un=l(),Gt=a("li"),Vt=a("p"),Gn=s("Use scale_parameter=False"),Vn=l(),Mt=a("li"),Ht=a("p"),Mn=s("Additional optimizer operations like gradient clipping should not be used alongside Adafactor"),Hn=l(),Bt=a("p"),Bn=s("Example:"),Jn=l(),u(ke.$$.fragment),Kn=l(),Jt=a("p"),Qn=s("Others reported the following combination to work well:"),Xn=l(),u(Ne.$$.fragment),Yn=l(),L=a("p"),Zn=s("When using "),Kt=a("code"),eo=s("lr=None"),to=s(" with "),ut=a("a"),ro=s("Trainer"),ao=s(" you will most likely need to use "),Qt=a("code"),no=s("AdafactorSchedule"),oo=s("scheduler as following:"),so=l(),u(Fe.$$.fragment),io=l(),Xt=a("p"),lo=s("Usage:"),co=l(),u(Ce.$$.fragment),mo=l(),ne=a("div"),u(Oe.$$.fragment),po=l(),Yt=a("p"),ho=s("Performs a single optimization step"),Nr=l(),F=a("h2"),oe=a("a"),Zt=a("span"),u(Re.$$.fragment),uo=l(),er=a("span"),fo=s("AdamWeightDecay (TensorFlow)"),Fr=l(),z=a("div"),u(je.$$.fragment),go=l(),C=a("p"),_o=s(`Adam enables L2 weight decay and clip_by_global_norm on gradients. Just adding the square of the weights to the loss function is `),tr=a("em"),wo=s("not"),vo=s(` the correct way of using L2 regularization/weight decay with Adam, since that will interact with the m and v parameters in strange ways as shown in `),qe=a("a"),yo=s(`Decoupled Weight Decay Regularization`),bo=s("."),$o=l(),rr=a("p"),Ao=s(`Instead we want ot decay the weights in a manner that doesn\u2019t interact with the m/v parameters. This is equivalent to adding the square of the weights to the loss with plain (non-momentum) SGD.`),zo=l(),se=a("div"),u(Ue.$$.fragment),Eo=l(),ar=a("p"),xo=s("Creates an optimizer from its config with WarmUp custom object."),Cr=l(),O=a("div"),u(Ge.$$.fragment),To=l(),nr=a("p"),Do=s("Creates an optimizer with a learning rate schedule using a warmup phase followed by a linear decay."),Or=l(),R=a("h2"),ie=a("a"),or=a("span"),u(Ve.$$.fragment),Lo=l(),sr=a("span"),Po=s("Schedules"),Rr=l(),j=a("h3"),le=a("a"),ir=a("span"),u(Me.$$.fragment),Wo=l(),lr=a("span"),So=s("Learning Rate Schedules (Pytorch)"),jr=l(),q=a("div"),u(He.$$.fragment),Io=l(),cr=a("p"),ko=s("An enumeration."),qr=l(),U=a("div"),u(Be.$$.fragment),No=l(),mr=a("p"),Fo=s("Unified API to get any scheduler from its name."),Ur=l(),G=a("div"),u(Je.$$.fragment),Co=l(),pr=a("p"),Oo=s("Create a schedule with a constant learning rate, using the learning rate set in optimizer."),Gr=l(),V=a("div"),u(Ke.$$.fragment),Ro=l(),dr=a("p"),jo=s(`Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate increases linearly between 0 and the initial lr set in the optimizer.`),Vr=l(),Qe=a("img"),Mr=l(),M=a("div"),u(Xe.$$.fragment),qo=l(),hr=a("p"),Uo=s(`Create a schedule with a learning rate that decreases following the values of the cosine function between the initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer.`),Hr=l(),Ye=a("img"),Br=l(),H=a("div"),u(Ze.$$.fragment),Go=l(),ur=a("p"),Vo=s(`Create a schedule with a learning rate that decreases following the values of the cosine function between the initial lr set in the optimizer to 0, with several hard restarts, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer.`),Jr=l(),et=a("img"),Kr=l(),B=a("div"),u(tt.$$.fragment),Mo=l(),fr=a("p"),Ho=s(`Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0, after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.`),Qr=l(),rt=a("img"),Xr=l(),P=a("div"),u(at.$$.fragment),Bo=l(),nt=a("p"),Jo=s(`Create a schedule with a learning rate that decreases as a polynomial decay from the initial lr set in the optimizer to end lr defined by `),gr=a("em"),Ko=s("lr_end"),Qo=s(`, after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.`),Xo=l(),ce=a("p"),Yo=s("Note: "),_r=a("em"),Zo=s("power"),es=s(` defaults to 1.0 as in the fairseq implementation, which in turn is based on the original BERT implementation at `),ot=a("a"),ts=s("https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/optimization.py#L37"),Yr=l(),J=a("h3"),me=a("a"),wr=a("span"),u(st.$$.fragment),rs=l(),vr=a("span"),as=s("Warmup (TensorFlow)"),Zr=l(),K=a("div"),u(it.$$.fragment),ns=l(),yr=a("p"),os=s("Applies a warmup schedule on a given learning rate decay schedule."),ea=l(),Q=a("h2"),pe=a("a"),br=a("span"),u(lt.$$.fragment),ss=l(),$r=a("span"),is=s("Gradient Strategies"),ta=l(),X=a("h3"),de=a("a"),Ar=a("span"),u(ct.$$.fragment),ls=l(),zr=a("span"),cs=s("GradientAccumulator (TensorFlow)"),ra=l(),W=a("div"),u(mt.$$.fragment),ms=l(),Y=a("p"),ps=s(`Gradient accumulation utility. When used with a distribution strategy, the accumulator should be called in a replica context. Gradients will be accumulated locally on each replica and without synchronization. Users should then call `),Er=a("code"),ds=s(".gradients"),hs=s(", scale the gradients if required, and pass the result to "),xr=a("code"),us=s("apply_gradients"),fs=s("."),gs=l(),he=a("div"),u(pt.$$.fragment),_s=l(),Tr=a("p"),ws=s("Resets the accumulated gradients on the current replica."),this.h()},l(e){const p=Xi('[data-svelte="svelte-1phssyn"]',document.head);S=n(p,"META",{name:!0,content:!0}),p.forEach(r),dt=c(e),E=n(e,"H1",{class:!0});var na=o(E);x=n(na,"A",{id:!0,class:!0,href:!0});var zs=o(x);bt=n(zs,"SPAN",{});var Es=o(bt);f(_e.$$.fragment,Es),Es.forEach(r),zs.forEach(r),Ca=c(na),$t=n(na,"SPAN",{});var xs=o($t);Oa=i(xs,"Optimization"),xs.forEach(r),na.forEach(r),Lr=c(e),ee=n(e,"P",{});var oa=o(ee);Ra=i(oa,"The "),At=n(oa,"CODE",{});var Ts=o(At);ja=i(Ts,".optimization"),Ts.forEach(r),qa=i(oa," module provides:"),oa.forEach(r),Pr=c(e),I=n(e,"UL",{});var ft=o(I);zt=n(ft,"LI",{});var Ds=o(zt);Ua=i(Ds,"an optimizer with weight decay fixed that can be used to fine-tuned models, and"),Ds.forEach(r),Ga=c(ft),we=n(ft,"LI",{});var sa=o(we);Va=i(sa,"several schedules in the form of schedule objects that inherit from "),Et=n(sa,"CODE",{});var Ls=o(Et);Ma=i(Ls,"_LRSchedule"),Ls.forEach(r),Ha=i(sa,":"),sa.forEach(r),Ba=c(ft),xt=n(ft,"LI",{});var Ps=o(xt);Ja=i(Ps,"a gradient accumulation class to accumulate the gradients of multiple batches"),Ps.forEach(r),ft.forEach(r),Wr=c(e),k=n(e,"H2",{class:!0});var ia=o(k);te=n(ia,"A",{id:!0,class:!0,href:!0});var Ws=o(te);Tt=n(Ws,"SPAN",{});var Ss=o(Tt);f(ve.$$.fragment,Ss),Ss.forEach(r),Ws.forEach(r),Ka=c(ia),Dt=n(ia,"SPAN",{});var Is=o(Dt);Qa=i(Is,"AdamW (PyTorch)"),Is.forEach(r),ia.forEach(r),Sr=c(e),T=n(e,"DIV",{class:!0});var gt=o(T);f(ye.$$.fragment,gt),Xa=c(gt),be=n(gt,"P",{});var la=o(be);Ya=i(la,"Implements Adam algorithm with weight decay fix as introduced in "),$e=n(la,"A",{href:!0,rel:!0});var ks=o($e);Za=i(ks,`Decoupled Weight Decay Regularization`),ks.forEach(r),en=i(la,"."),la.forEach(r),tn=c(gt),re=n(gt,"DIV",{class:!0});var ca=o(re);f(Ae.$$.fragment,ca),rn=c(ca),Lt=n(ca,"P",{});var Ns=o(Lt);an=i(Ns,"Performs a single optimization step."),Ns.forEach(r),ca.forEach(r),gt.forEach(r),Ir=c(e),N=n(e,"H2",{class:!0});var ma=o(N);ae=n(ma,"A",{id:!0,class:!0,href:!0});var Fs=o(ae);Pt=n(Fs,"SPAN",{});var Cs=o(Pt);f(ze.$$.fragment,Cs),Cs.forEach(r),Fs.forEach(r),nn=c(ma),Wt=n(ma,"SPAN",{});var Os=o(Wt);on=i(Os,"AdaFactor (PyTorch)"),Os.forEach(r),ma.forEach(r),kr=c(e),h=n(e,"DIV",{class:!0});var y=o(h);f(Ee.$$.fragment,y),sn=c(y),ht=n(y,"P",{});var vs=o(ht);ln=i(vs,`AdaFactor pytorch implementation can be used as a drop in replacement for Adam original fairseq code: `),xe=n(vs,"A",{href:!0,rel:!0});var Rs=o(xe);cn=i(Rs,"https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py"),Rs.forEach(r),vs.forEach(r),mn=c(y),b=n(y,"P",{});var A=o(b);pn=i(A,"Paper: "),St=n(A,"EM",{});var js=o(St);dn=i(js,"Adafactor: Adaptive Learning Rates with Sublinear Memory Cost"),js.forEach(r),hn=c(A),Te=n(A,"A",{href:!0,rel:!0});var qs=o(Te);un=i(qs,"https://arxiv.org/abs/1804.04235"),qs.forEach(r),fn=i(A,` Note that this optimizer internally adjusts the learning rate depending on the `),It=n(A,"CODE",{});var Us=o(It);gn=i(Us,"scale_parameter"),Us.forEach(r),_n=i(A,", "),kt=n(A,"CODE",{});var Gs=o(kt);wn=i(Gs,"relative_step"),Gs.forEach(r),vn=i(A,` and `),Nt=n(A,"CODE",{});var Vs=o(Nt);yn=i(Vs,"warmup_init"),Vs.forEach(r),bn=i(A," options. To use a manual (external) learning rate schedule you should set "),Ft=n(A,"CODE",{});var Ms=o(Ft);$n=i(Ms,"scale_parameter=False"),Ms.forEach(r),An=i(A,` and `),Ct=n(A,"CODE",{});var Hs=o(Ct);zn=i(Hs,"relative_step=False"),Hs.forEach(r),En=i(A,"."),A.forEach(r),xn=c(y),Ot=n(y,"P",{});var Bs=o(Ot);Tn=i(Bs,"This implementation handles low-precision (FP16, bfloat) values, but we have not thoroughly tested."),Bs.forEach(r),Dn=c(y),De=n(y,"P",{});var pa=o(De);Ln=i(pa,"Recommended T5 finetuning settings ("),Le=n(pa,"A",{href:!0,rel:!0});var Js=o(Le);Pn=i(Js,"https://discuss.huggingface.co/t/t5-finetuning-tips/684/3"),Js.forEach(r),Wn=i(pa,"):"),pa.forEach(r),Sn=c(y),D=n(y,"UL",{});var ue=o(D);Pe=n(ue,"LI",{});var da=o(Pe);Rt=n(da,"P",{});var Ks=o(Rt);In=i(Ks,"Training without LR warmup or clip_threshold is not recommended."),Ks.forEach(r),kn=c(da),We=n(da,"UL",{});var ha=o(We);jt=n(ha,"LI",{});var Qs=o(jt);Nn=i(Qs,"use scheduled LR warm-up to fixed LR"),Qs.forEach(r),Fn=c(ha),Se=n(ha,"LI",{});var ua=o(Se);Cn=i(ua,"use clip_threshold=1.0 ("),Ie=n(ua,"A",{href:!0,rel:!0});var Xs=o(Ie);On=i(Xs,"https://arxiv.org/abs/1804.04235"),Xs.forEach(r),Rn=i(ua,")"),ua.forEach(r),ha.forEach(r),da.forEach(r),jn=c(ue),qt=n(ue,"LI",{});var Ys=o(qt);Ut=n(Ys,"P",{});var Zs=o(Ut);qn=i(Zs,"Disable relative updates"),Zs.forEach(r),Ys.forEach(r),Un=c(ue),Gt=n(ue,"LI",{});var ei=o(Gt);Vt=n(ei,"P",{});var ti=o(Vt);Gn=i(ti,"Use scale_parameter=False"),ti.forEach(r),ei.forEach(r),Vn=c(ue),Mt=n(ue,"LI",{});var ri=o(Mt);Ht=n(ri,"P",{});var ai=o(Ht);Mn=i(ai,"Additional optimizer operations like gradient clipping should not be used alongside Adafactor"),ai.forEach(r),ri.forEach(r),ue.forEach(r),Hn=c(y),Bt=n(y,"P",{});var ni=o(Bt);Bn=i(ni,"Example:"),ni.forEach(r),Jn=c(y),f(ke.$$.fragment,y),Kn=c(y),Jt=n(y,"P",{});var oi=o(Jt);Qn=i(oi,"Others reported the following combination to work well:"),oi.forEach(r),Xn=c(y),f(Ne.$$.fragment,y),Yn=c(y),L=n(y,"P",{});var fe=o(L);Zn=i(fe,"When using "),Kt=n(fe,"CODE",{});var si=o(Kt);eo=i(si,"lr=None"),si.forEach(r),to=i(fe," with "),ut=n(fe,"A",{href:!0});var ii=o(ut);ro=i(ii,"Trainer"),ii.forEach(r),ao=i(fe," you will most likely need to use "),Qt=n(fe,"CODE",{});var li=o(Qt);no=i(li,"AdafactorSchedule"),li.forEach(r),oo=i(fe,"scheduler as following:"),fe.forEach(r),so=c(y),f(Fe.$$.fragment,y),io=c(y),Xt=n(y,"P",{});var ci=o(Xt);lo=i(ci,"Usage:"),ci.forEach(r),co=c(y),f(Ce.$$.fragment,y),mo=c(y),ne=n(y,"DIV",{class:!0});var fa=o(ne);f(Oe.$$.fragment,fa),po=c(fa),Yt=n(fa,"P",{});var mi=o(Yt);ho=i(mi,"Performs a single optimization step"),mi.forEach(r),fa.forEach(r),y.forEach(r),Nr=c(e),F=n(e,"H2",{class:!0});var ga=o(F);oe=n(ga,"A",{id:!0,class:!0,href:!0});var pi=o(oe);Zt=n(pi,"SPAN",{});var di=o(Zt);f(Re.$$.fragment,di),di.forEach(r),pi.forEach(r),uo=c(ga),er=n(ga,"SPAN",{});var hi=o(er);fo=i(hi,"AdamWeightDecay (TensorFlow)"),hi.forEach(r),ga.forEach(r),Fr=c(e),z=n(e,"DIV",{class:!0});var ge=o(z);f(je.$$.fragment,ge),go=c(ge),C=n(ge,"P",{});var _t=o(C);_o=i(_t,`Adam enables L2 weight decay and clip_by_global_norm on gradients. Just adding the square of the weights to the loss function is `),tr=n(_t,"EM",{});var ui=o(tr);wo=i(ui,"not"),ui.forEach(r),vo=i(_t,` the correct way of using L2 regularization/weight decay with Adam, since that will interact with the m and v parameters in strange ways as shown in `),qe=n(_t,"A",{href:!0,rel:!0});var fi=o(qe);yo=i(fi,`Decoupled Weight Decay Regularization`),fi.forEach(r),bo=i(_t,"."),_t.forEach(r),$o=c(ge),rr=n(ge,"P",{});var gi=o(rr);Ao=i(gi,`Instead we want ot decay the weights in a manner that doesn\u2019t interact with the m/v parameters. This is equivalent to adding the square of the weights to the loss with plain (non-momentum) SGD.`),gi.forEach(r),zo=c(ge),se=n(ge,"DIV",{class:!0});var _a=o(se);f(Ue.$$.fragment,_a),Eo=c(_a),ar=n(_a,"P",{});var _i=o(ar);xo=i(_i,"Creates an optimizer from its config with WarmUp custom object."),_i.forEach(r),_a.forEach(r),ge.forEach(r),Cr=c(e),O=n(e,"DIV",{class:!0});var wa=o(O);f(Ge.$$.fragment,wa),To=c(wa),nr=n(wa,"P",{});var wi=o(nr);Do=i(wi,"Creates an optimizer with a learning rate schedule using a warmup phase followed by a linear decay."),wi.forEach(r),wa.forEach(r),Or=c(e),R=n(e,"H2",{class:!0});var va=o(R);ie=n(va,"A",{id:!0,class:!0,href:!0});var vi=o(ie);or=n(vi,"SPAN",{});var yi=o(or);f(Ve.$$.fragment,yi),yi.forEach(r),vi.forEach(r),Lo=c(va),sr=n(va,"SPAN",{});var bi=o(sr);Po=i(bi,"Schedules"),bi.forEach(r),va.forEach(r),Rr=c(e),j=n(e,"H3",{class:!0});var ya=o(j);le=n(ya,"A",{id:!0,class:!0,href:!0});var $i=o(le);ir=n($i,"SPAN",{});var Ai=o(ir);f(Me.$$.fragment,Ai),Ai.forEach(r),$i.forEach(r),Wo=c(ya),lr=n(ya,"SPAN",{});var zi=o(lr);So=i(zi,"Learning Rate Schedules (Pytorch)"),zi.forEach(r),ya.forEach(r),jr=c(e),q=n(e,"DIV",{class:!0});var ba=o(q);f(He.$$.fragment,ba),Io=c(ba),cr=n(ba,"P",{});var Ei=o(cr);ko=i(Ei,"An enumeration."),Ei.forEach(r),ba.forEach(r),qr=c(e),U=n(e,"DIV",{class:!0});var $a=o(U);f(Be.$$.fragment,$a),No=c($a),mr=n($a,"P",{});var xi=o(mr);Fo=i(xi,"Unified API to get any scheduler from its name."),xi.forEach(r),$a.forEach(r),Ur=c(e),G=n(e,"DIV",{class:!0});var Aa=o(G);f(Je.$$.fragment,Aa),Co=c(Aa),pr=n(Aa,"P",{});var Ti=o(pr);Oo=i(Ti,"Create a schedule with a constant learning rate, using the learning rate set in optimizer."),Ti.forEach(r),Aa.forEach(r),Gr=c(e),V=n(e,"DIV",{class:!0});var za=o(V);f(Ke.$$.fragment,za),Ro=c(za),dr=n(za,"P",{});var Di=o(dr);jo=i(Di,`Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate increases linearly between 0 and the initial lr set in the optimizer.`),Di.forEach(r),za.forEach(r),Vr=c(e),Qe=n(e,"IMG",{alt:!0,src:!0}),Mr=c(e),M=n(e,"DIV",{class:!0});var Ea=o(M);f(Xe.$$.fragment,Ea),qo=c(Ea),hr=n(Ea,"P",{});var Li=o(hr);Uo=i(Li,`Create a schedule with a learning rate that decreases following the values of the cosine function between the initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer.`),Li.forEach(r),Ea.forEach(r),Hr=c(e),Ye=n(e,"IMG",{alt:!0,src:!0}),Br=c(e),H=n(e,"DIV",{class:!0});var xa=o(H);f(Ze.$$.fragment,xa),Go=c(xa),ur=n(xa,"P",{});var Pi=o(ur);Vo=i(Pi,`Create a schedule with a learning rate that decreases following the values of the cosine function between the initial lr set in the optimizer to 0, with several hard restarts, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer.`),Pi.forEach(r),xa.forEach(r),Jr=c(e),et=n(e,"IMG",{alt:!0,src:!0}),Kr=c(e),B=n(e,"DIV",{class:!0});var Ta=o(B);f(tt.$$.fragment,Ta),Mo=c(Ta),fr=n(Ta,"P",{});var Wi=o(fr);Ho=i(Wi,`Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0, after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.`),Wi.forEach(r),Ta.forEach(r),Qr=c(e),rt=n(e,"IMG",{alt:!0,src:!0}),Xr=c(e),P=n(e,"DIV",{class:!0});var wt=o(P);f(at.$$.fragment,wt),Bo=c(wt),nt=n(wt,"P",{});var Da=o(nt);Jo=i(Da,`Create a schedule with a learning rate that decreases as a polynomial decay from the initial lr set in the optimizer to end lr defined by `),gr=n(Da,"EM",{});var Si=o(gr);Ko=i(Si,"lr_end"),Si.forEach(r),Qo=i(Da,`, after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.`),Da.forEach(r),Xo=c(wt),ce=n(wt,"P",{});var Dr=o(ce);Yo=i(Dr,"Note: "),_r=n(Dr,"EM",{});var Ii=o(_r);Zo=i(Ii,"power"),Ii.forEach(r),es=i(Dr,` defaults to 1.0 as in the fairseq implementation, which in turn is based on the original BERT implementation at `),ot=n(Dr,"A",{href:!0,rel:!0});var ki=o(ot);ts=i(ki,"https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/optimization.py#L37"),ki.forEach(r),Dr.forEach(r),wt.forEach(r),Yr=c(e),J=n(e,"H3",{class:!0});var La=o(J);me=n(La,"A",{id:!0,class:!0,href:!0});var Ni=o(me);wr=n(Ni,"SPAN",{});var Fi=o(wr);f(st.$$.fragment,Fi),Fi.forEach(r),Ni.forEach(r),rs=c(La),vr=n(La,"SPAN",{});var Ci=o(vr);as=i(Ci,"Warmup (TensorFlow)"),Ci.forEach(r),La.forEach(r),Zr=c(e),K=n(e,"DIV",{class:!0});var Pa=o(K);f(it.$$.fragment,Pa),ns=c(Pa),yr=n(Pa,"P",{});var Oi=o(yr);os=i(Oi,"Applies a warmup schedule on a given learning rate decay schedule."),Oi.forEach(r),Pa.forEach(r),ea=c(e),Q=n(e,"H2",{class:!0});var Wa=o(Q);pe=n(Wa,"A",{id:!0,class:!0,href:!0});var Ri=o(pe);br=n(Ri,"SPAN",{});var ji=o(br);f(lt.$$.fragment,ji),ji.forEach(r),Ri.forEach(r),ss=c(Wa),$r=n(Wa,"SPAN",{});var qi=o($r);is=i(qi,"Gradient Strategies"),qi.forEach(r),Wa.forEach(r),ta=c(e),X=n(e,"H3",{class:!0});var Sa=o(X);de=n(Sa,"A",{id:!0,class:!0,href:!0});var Ui=o(de);Ar=n(Ui,"SPAN",{});var Gi=o(Ar);f(ct.$$.fragment,Gi),Gi.forEach(r),Ui.forEach(r),ls=c(Sa),zr=n(Sa,"SPAN",{});var Vi=o(zr);cs=i(Vi,"GradientAccumulator (TensorFlow)"),Vi.forEach(r),Sa.forEach(r),ra=c(e),W=n(e,"DIV",{class:!0});var vt=o(W);f(mt.$$.fragment,vt),ms=c(vt),Y=n(vt,"P",{});var yt=o(Y);ps=i(yt,`Gradient accumulation utility. When used with a distribution strategy, the accumulator should be called in a replica context. Gradients will be accumulated locally on each replica and without synchronization. Users should then call `),Er=n(yt,"CODE",{});var Mi=o(Er);ds=i(Mi,".gradients"),Mi.forEach(r),hs=i(yt,", scale the gradients if required, and pass the result to "),xr=n(yt,"CODE",{});var Hi=o(xr);us=i(Hi,"apply_gradients"),Hi.forEach(r),fs=i(yt,"."),yt.forEach(r),gs=c(vt),he=n(vt,"DIV",{class:!0});var Ia=o(he);f(pt.$$.fragment,Ia),_s=c(Ia),Tr=n(Ia,"P",{});var Bi=o(Tr);ws=i(Bi,"Resets the accumulated gradients on the current replica."),Bi.forEach(r),Ia.forEach(r),vt.forEach(r),this.h()},h(){m(S,"name","hf:doc:metadata"),m(S,"content",JSON.stringify(el)),m(x,"id","optimization"),m(x,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(x,"href","#optimization"),m(E,"class","relative group"),m(te,"id","transformers.AdamW"),m(te,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(te,"href","#transformers.AdamW"),m(k,"class","relative group"),m($e,"href","https://arxiv.org/abs/1711.05101"),m($e,"rel","nofollow"),m(re,"class","docstring"),m(T,"class","docstring"),m(ae,"id","transformers.Adafactor"),m(ae,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(ae,"href","#transformers.Adafactor"),m(N,"class","relative group"),m(xe,"href","https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py"),m(xe,"rel","nofollow"),m(Te,"href","https://arxiv.org/abs/1804.04235"),m(Te,"rel","nofollow"),m(Le,"href","https://discuss.huggingface.co/t/t5-finetuning-tips/684/3"),m(Le,"rel","nofollow"),m(Ie,"href","https://arxiv.org/abs/1804.04235"),m(Ie,"rel","nofollow"),m(ut,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),m(ne,"class","docstring"),m(h,"class","docstring"),m(oe,"id","transformers.AdamWeightDecay"),m(oe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(oe,"href","#transformers.AdamWeightDecay"),m(F,"class","relative group"),m(qe,"href","https://arxiv.org/abs/1711.05101"),m(qe,"rel","nofollow"),m(se,"class","docstring"),m(z,"class","docstring"),m(O,"class","docstring"),m(ie,"id","schedules"),m(ie,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(ie,"href","#schedules"),m(R,"class","relative group"),m(le,"id","transformers.SchedulerType"),m(le,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(le,"href","#transformers.SchedulerType"),m(j,"class","relative group"),m(q,"class","docstring"),m(U,"class","docstring"),m(G,"class","docstring"),m(V,"class","docstring"),m(Qe,"alt",""),ka(Qe.src,ys="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/warmup_constant_schedule.png")||m(Qe,"src",ys),m(M,"class","docstring"),m(Ye,"alt",""),ka(Ye.src,bs="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/warmup_cosine_schedule.png")||m(Ye,"src",bs),m(H,"class","docstring"),m(et,"alt",""),ka(et.src,$s="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/warmup_cosine_hard_restarts_schedule.png")||m(et,"src",$s),m(B,"class","docstring"),m(rt,"alt",""),ka(rt.src,As="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/warmup_linear_schedule.png")||m(rt,"src",As),m(ot,"href","https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/optimization.py#L37"),m(ot,"rel","nofollow"),m(P,"class","docstring"),m(me,"id","transformers.WarmUp"),m(me,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(me,"href","#transformers.WarmUp"),m(J,"class","relative group"),m(K,"class","docstring"),m(pe,"id","gradient-strategies"),m(pe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(pe,"href","#gradient-strategies"),m(Q,"class","relative group"),m(de,"id","transformers.GradientAccumulator"),m(de,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(de,"href","#transformers.GradientAccumulator"),m(X,"class","relative group"),m(he,"class","docstring"),m(W,"class","docstring")},m(e,p){t(document.head,S),d(e,dt,p),d(e,E,p),t(E,x),t(x,bt),g(_e,bt,null),t(E,Ca),t(E,$t),t($t,Oa),d(e,Lr,p),d(e,ee,p),t(ee,Ra),t(ee,At),t(At,ja),t(ee,qa),d(e,Pr,p),d(e,I,p),t(I,zt),t(zt,Ua),t(I,Ga),t(I,we),t(we,Va),t(we,Et),t(Et,Ma),t(we,Ha),t(I,Ba),t(I,xt),t(xt,Ja),d(e,Wr,p),d(e,k,p),t(k,te),t(te,Tt),g(ve,Tt,null),t(k,Ka),t(k,Dt),t(Dt,Qa),d(e,Sr,p),d(e,T,p),g(ye,T,null),t(T,Xa),t(T,be),t(be,Ya),t(be,$e),t($e,Za),t(be,en),t(T,tn),t(T,re),g(Ae,re,null),t(re,rn),t(re,Lt),t(Lt,an),d(e,Ir,p),d(e,N,p),t(N,ae),t(ae,Pt),g(ze,Pt,null),t(N,nn),t(N,Wt),t(Wt,on),d(e,kr,p),d(e,h,p),g(Ee,h,null),t(h,sn),t(h,ht),t(ht,ln),t(ht,xe),t(xe,cn),t(h,mn),t(h,b),t(b,pn),t(b,St),t(St,dn),t(b,hn),t(b,Te),t(Te,un),t(b,fn),t(b,It),t(It,gn),t(b,_n),t(b,kt),t(kt,wn),t(b,vn),t(b,Nt),t(Nt,yn),t(b,bn),t(b,Ft),t(Ft,$n),t(b,An),t(b,Ct),t(Ct,zn),t(b,En),t(h,xn),t(h,Ot),t(Ot,Tn),t(h,Dn),t(h,De),t(De,Ln),t(De,Le),t(Le,Pn),t(De,Wn),t(h,Sn),t(h,D),t(D,Pe),t(Pe,Rt),t(Rt,In),t(Pe,kn),t(Pe,We),t(We,jt),t(jt,Nn),t(We,Fn),t(We,Se),t(Se,Cn),t(Se,Ie),t(Ie,On),t(Se,Rn),t(D,jn),t(D,qt),t(qt,Ut),t(Ut,qn),t(D,Un),t(D,Gt),t(Gt,Vt),t(Vt,Gn),t(D,Vn),t(D,Mt),t(Mt,Ht),t(Ht,Mn),t(h,Hn),t(h,Bt),t(Bt,Bn),t(h,Jn),g(ke,h,null),t(h,Kn),t(h,Jt),t(Jt,Qn),t(h,Xn),g(Ne,h,null),t(h,Yn),t(h,L),t(L,Zn),t(L,Kt),t(Kt,eo),t(L,to),t(L,ut),t(ut,ro),t(L,ao),t(L,Qt),t(Qt,no),t(L,oo),t(h,so),g(Fe,h,null),t(h,io),t(h,Xt),t(Xt,lo),t(h,co),g(Ce,h,null),t(h,mo),t(h,ne),g(Oe,ne,null),t(ne,po),t(ne,Yt),t(Yt,ho),d(e,Nr,p),d(e,F,p),t(F,oe),t(oe,Zt),g(Re,Zt,null),t(F,uo),t(F,er),t(er,fo),d(e,Fr,p),d(e,z,p),g(je,z,null),t(z,go),t(z,C),t(C,_o),t(C,tr),t(tr,wo),t(C,vo),t(C,qe),t(qe,yo),t(C,bo),t(z,$o),t(z,rr),t(rr,Ao),t(z,zo),t(z,se),g(Ue,se,null),t(se,Eo),t(se,ar),t(ar,xo),d(e,Cr,p),d(e,O,p),g(Ge,O,null),t(O,To),t(O,nr),t(nr,Do),d(e,Or,p),d(e,R,p),t(R,ie),t(ie,or),g(Ve,or,null),t(R,Lo),t(R,sr),t(sr,Po),d(e,Rr,p),d(e,j,p),t(j,le),t(le,ir),g(Me,ir,null),t(j,Wo),t(j,lr),t(lr,So),d(e,jr,p),d(e,q,p),g(He,q,null),t(q,Io),t(q,cr),t(cr,ko),d(e,qr,p),d(e,U,p),g(Be,U,null),t(U,No),t(U,mr),t(mr,Fo),d(e,Ur,p),d(e,G,p),g(Je,G,null),t(G,Co),t(G,pr),t(pr,Oo),d(e,Gr,p),d(e,V,p),g(Ke,V,null),t(V,Ro),t(V,dr),t(dr,jo),d(e,Vr,p),d(e,Qe,p),d(e,Mr,p),d(e,M,p),g(Xe,M,null),t(M,qo),t(M,hr),t(hr,Uo),d(e,Hr,p),d(e,Ye,p),d(e,Br,p),d(e,H,p),g(Ze,H,null),t(H,Go),t(H,ur),t(ur,Vo),d(e,Jr,p),d(e,et,p),d(e,Kr,p),d(e,B,p),g(tt,B,null),t(B,Mo),t(B,fr),t(fr,Ho),d(e,Qr,p),d(e,rt,p),d(e,Xr,p),d(e,P,p),g(at,P,null),t(P,Bo),t(P,nt),t(nt,Jo),t(nt,gr),t(gr,Ko),t(nt,Qo),t(P,Xo),t(P,ce),t(ce,Yo),t(ce,_r),t(_r,Zo),t(ce,es),t(ce,ot),t(ot,ts),d(e,Yr,p),d(e,J,p),t(J,me),t(me,wr),g(st,wr,null),t(J,rs),t(J,vr),t(vr,as),d(e,Zr,p),d(e,K,p),g(it,K,null),t(K,ns),t(K,yr),t(yr,os),d(e,ea,p),d(e,Q,p),t(Q,pe),t(pe,br),g(lt,br,null),t(Q,ss),t(Q,$r),t($r,is),d(e,ta,p),d(e,X,p),t(X,de),t(de,Ar),g(ct,Ar,null),t(X,ls),t(X,zr),t(zr,cs),d(e,ra,p),d(e,W,p),g(mt,W,null),t(W,ms),t(W,Y),t(Y,ps),t(Y,Er),t(Er,ds),t(Y,hs),t(Y,xr),t(xr,us),t(Y,fs),t(W,gs),t(W,he),g(pt,he,null),t(he,_s),t(he,Tr),t(Tr,ws),aa=!0},p:Yi,i(e){aa||(_(_e.$$.fragment,e),_(ve.$$.fragment,e),_(ye.$$.fragment,e),_(Ae.$$.fragment,e),_(ze.$$.fragment,e),_(Ee.$$.fragment,e),_(ke.$$.fragment,e),_(Ne.$$.fragment,e),_(Fe.$$.fragment,e),_(Ce.$$.fragment,e),_(Oe.$$.fragment,e),_(Re.$$.fragment,e),_(je.$$.fragment,e),_(Ue.$$.fragment,e),_(Ge.$$.fragment,e),_(Ve.$$.fragment,e),_(Me.$$.fragment,e),_(He.$$.fragment,e),_(Be.$$.fragment,e),_(Je.$$.fragment,e),_(Ke.$$.fragment,e),_(Xe.$$.fragment,e),_(Ze.$$.fragment,e),_(tt.$$.fragment,e),_(at.$$.fragment,e),_(st.$$.fragment,e),_(it.$$.fragment,e),_(lt.$$.fragment,e),_(ct.$$.fragment,e),_(mt.$$.fragment,e),_(pt.$$.fragment,e),aa=!0)},o(e){w(_e.$$.fragment,e),w(ve.$$.fragment,e),w(ye.$$.fragment,e),w(Ae.$$.fragment,e),w(ze.$$.fragment,e),w(Ee.$$.fragment,e),w(ke.$$.fragment,e),w(Ne.$$.fragment,e),w(Fe.$$.fragment,e),w(Ce.$$.fragment,e),w(Oe.$$.fragment,e),w(Re.$$.fragment,e),w(je.$$.fragment,e),w(Ue.$$.fragment,e),w(Ge.$$.fragment,e),w(Ve.$$.fragment,e),w(Me.$$.fragment,e),w(He.$$.fragment,e),w(Be.$$.fragment,e),w(Je.$$.fragment,e),w(Ke.$$.fragment,e),w(Xe.$$.fragment,e),w(Ze.$$.fragment,e),w(tt.$$.fragment,e),w(at.$$.fragment,e),w(st.$$.fragment,e),w(it.$$.fragment,e),w(lt.$$.fragment,e),w(ct.$$.fragment,e),w(mt.$$.fragment,e),w(pt.$$.fragment,e),aa=!1},d(e){r(S),e&&r(dt),e&&r(E),v(_e),e&&r(Lr),e&&r(ee),e&&r(Pr),e&&r(I),e&&r(Wr),e&&r(k),v(ve),e&&r(Sr),e&&r(T),v(ye),v(Ae),e&&r(Ir),e&&r(N),v(ze),e&&r(kr),e&&r(h),v(Ee),v(ke),v(Ne),v(Fe),v(Ce),v(Oe),e&&r(Nr),e&&r(F),v(Re),e&&r(Fr),e&&r(z),v(je),v(Ue),e&&r(Cr),e&&r(O),v(Ge),e&&r(Or),e&&r(R),v(Ve),e&&r(Rr),e&&r(j),v(Me),e&&r(jr),e&&r(q),v(He),e&&r(qr),e&&r(U),v(Be),e&&r(Ur),e&&r(G),v(Je),e&&r(Gr),e&&r(V),v(Ke),e&&r(Vr),e&&r(Qe),e&&r(Mr),e&&r(M),v(Xe),e&&r(Hr),e&&r(Ye),e&&r(Br),e&&r(H),v(Ze),e&&r(Jr),e&&r(et),e&&r(Kr),e&&r(B),v(tt),e&&r(Qr),e&&r(rt),e&&r(Xr),e&&r(P),v(at),e&&r(Yr),e&&r(J),v(st),e&&r(Zr),e&&r(K),v(it),e&&r(ea),e&&r(Q),v(lt),e&&r(ta),e&&r(X),v(ct),e&&r(ra),e&&r(W),v(mt),v(pt)}}}const el={local:"optimization",sections:[{local:"transformers.AdamW",title:"AdamW (PyTorch)"},{local:"transformers.Adafactor",title:"AdaFactor (PyTorch)"},{local:"transformers.AdamWeightDecay",title:"AdamWeightDecay (TensorFlow)"},{local:"schedules",sections:[{local:"transformers.SchedulerType",title:"Learning Rate Schedules (Pytorch)"},{local:"transformers.WarmUp",title:"Warmup (TensorFlow)"}],title:"Schedules"},{local:"gradient-strategies",sections:[{local:"transformers.GradientAccumulator",title:"GradientAccumulator (TensorFlow)"}],title:"Gradient Strategies"}],title:"Optimization"};function tl(Fa,S,dt){let{fw:E}=S;return Fa.$$set=x=>{"fw"in x&&dt(0,E=x.fw)},[E]}class il extends Ji{constructor(S){super();Ki(this,S,tl,Zi,Qi,{fw:0})}}export{il as default,el as metadata};
404
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/main_classes/processors.mdx-9cb76e12.js
import{S as xc,i as Ec,s as wc,e as s,k as i,w as h,t as n,M as yc,c as a,d as r,m as p,a as o,x as u,h as l,b as f,F as t,g as d,y as _,q as g,o as v,B as $}from"../../chunks/vendor-4833417e.js";import{T as wi}from"../../chunks/Tip-fffd6df1.js";import{D as y}from"../../chunks/Docstring-4f315ed9.js";import{C as As}from"../../chunks/CodeBlock-6a3d1b46.js";import{I as Oe}from"../../chunks/IconCopyLink-4b81c553.js";import"../../chunks/CopyButton-dacfbfaf.js";function Pc(B){let m,k,b,x,N,q,Q,M;return{c(){m=s("p"),k=n(`This class method is simply calling the feature extractor `),b=s("a"),x=n("from_pretrained()"),N=n(` and the tokenizer `),q=s("code"),Q=n("from_pretrained"),M=n(` methods. Please refer to the docstrings of the methods above for more information.`),this.h()},l(L){m=a(L,"P",{});var E=o(m);k=l(E,`This class method is simply calling the feature extractor `),b=a(E,"A",{href:!0});var V=o(b);x=l(V,"from_pretrained()"),V.forEach(r),N=l(E,` and the tokenizer `),q=a(E,"CODE",{});var K=o(q);Q=l(K,"from_pretrained"),K.forEach(r),M=l(E,` methods. Please refer to the docstrings of the methods above for more information.`),E.forEach(r),this.h()},h(){f(b,"href","/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin.from_pretrained")},m(L,E){d(L,m,E),t(m,k),t(m,b),t(b,x),t(m,N),t(m,q),t(q,Q),t(m,M)},d(L){L&&r(m)}}}function qc(B){let m,k;return{c(){m=s("p"),k=n("This API is experimental and may have some slight breaking changes in the next releases.")},l(b){m=a(b,"P",{});var x=o(m);k=l(x,"This API is experimental and may have some slight breaking changes in the next releases."),x.forEach(r)},m(b,x){d(b,m,x),t(m,k)},d(b){b&&r(m)}}}function kc(B){let m,k,b,x,N,q,Q,M;return{c(){m=s("p"),k=n("This class method is simply calling "),b=s("a"),x=n("save_pretrained()"),N=n(` and `),q=s("code"),Q=n("save_pretrained"),M=n(`. Please refer to the docstrings of the methods above for more information.`),this.h()},l(L){m=a(L,"P",{});var E=o(m);k=l(E,"This class method is simply calling "),b=a(E,"A",{href:!0});var V=o(b);x=l(V,"save_pretrained()"),V.forEach(r),N=l(E,` and `),q=a(E,"CODE",{});var K=o(q);Q=l(K,"save_pretrained"),K.forEach(r),M=l(E,`. Please refer to the docstrings of the methods above for more information.`),E.forEach(r),this.h()},h(){f(b,"href","/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin.save_pretrained")},m(L,E){d(L,m,E),t(m,k),t(m,b),t(b,x),t(m,N),t(m,q),t(q,Q),t(m,M)},d(L){L&&r(m)}}}function Ic(B){let m,k,b,x,N,q,Q,M,L,E,V,K,Ls,ie,Y,Za,Ut,eo,to,Ft,ro,so,ao,_r,oo,Ds,Z,pe,gr,Qe,no,vr,lo,Ts,Ht,io,Ns,Gt,po,Ms,S,Ue,co,$r,fo,mo,G,Fe,ho,br,uo,_o,ce,go,z,He,vo,Ge,$o,xr,bo,xo,Eo,Er,wo,yo,Re,Po,R,We,qo,Xe,ko,wr,Io,So,Ao,fe,Lo,W,Je,Do,Be,To,Rt,No,Mo,jo,de,js,ee,me,yr,Ke,Vo,Pr,zo,Vs,T,Co,Wt,Oo,Qo,Xt,Uo,Fo,Jt,Ho,Go,Bt,Ro,Wo,zs,P,Ye,Xo,qr,Jo,Bo,he,Ze,Ko,et,Yo,Kt,Zo,en,tn,ue,tt,rn,kr,sn,an,_e,rt,on,Ir,nn,ln,ge,st,pn,at,cn,Yt,fn,dn,mn,ve,ot,hn,nt,un,Zt,_n,gn,vn,$e,lt,$n,Sr,bn,Cs,U,it,xn,Ar,En,wn,be,pt,yn,Lr,Pn,Os,F,ct,qn,Dr,kn,In,xe,ft,Sn,Tr,An,Qs,te,Ee,Nr,dt,Ln,Mr,Dn,Us,we,mt,Tn,Nn,ht,Mn,Fs,er,jn,Hs,tr,Vn,Gs,w,jr,Vr,zn,Cn,zr,Cr,On,Qn,Or,Qr,Un,Fn,Ur,Fr,Hn,Gn,Hr,Gr,Rn,Wn,Rr,Wr,Xn,Jn,Xr,Jr,Bn,Kn,Br,Kr,Yn,Zn,Yr,Zr,el,Rs,ye,tl,rr,rl,sl,Ws,re,ut,al,sr,ol,es,nl,Xs,se,Pe,ts,_t,ll,rs,il,Js,ae,gt,pl,cl,vt,ss,fl,dl,Bs,$t,ml,bt,hl,Ks,ar,ul,Ys,or,as,os,_l,Zs,nr,gl,ea,qe,vl,xt,$l,bl,ta,oe,ke,ns,Et,xl,ls,El,ra,H,wt,wl,yl,yt,Pl,ql,Pt,kl,Il,sa,lr,Sl,aa,ne,Ie,is,qt,Al,ps,Ll,oa,ir,Dl,na,Se,cs,fs,Tl,Nl,ds,ms,Ml,la,kt,jl,hs,Vl,ia,D,It,zl,us,Cl,Ol,Ae,St,Ql,_s,Ul,Fl,C,At,Hl,Lt,Gl,gs,Rl,Wl,Xl,vs,Jl,Bl,Dt,Kl,Le,Tt,Yl,$s,Zl,pa,De,ei,bs,ti,ri,ca,j,Nt,si,xs,ai,oi,Es,ni,li,Mt,fa,Te,ii,ws,pi,ci,da,le,Ne,ys,jt,fi,Ps,di,ma,pr,mi,ha,Vt,ua,Me,hi,qs,ui,_i,_a,zt,ga,je,gi,Ct,vi,$i,va;return q=new Oe({}),Qe=new Oe({}),Ue=new y({props:{name:"class transformers.ProcessorMixin",anchor:"transformers.ProcessorMixin",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/processing_utils.py#L44"}}),Fe=new y({props:{name:"from_pretrained",anchor:"transformers.ProcessorMixin.from_pretrained",parameters:[{name:"pretrained_model_name_or_path",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/processing_utils.py#L157",parametersDescription:[{anchor:"transformers.ProcessorMixin.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; This can be either:</p> <ul> <li>a string, the <em>model id</em> of a pretrained feature_extractor hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>a path to a <em>directory</em> containing a feature extractor file saved using the <a href="/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin.save_pretrained">save_pretrained()</a> method, e.g., <code>./my_model_directory/</code>.</li> <li>a path or url to a saved feature extractor JSON <em>file</em>, e.g., <code>./my_model_directory/preprocessor_config.json</code>. **kwargs &#x2014; Additional keyword arguments passed along to both <a href="/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin.from_pretrained">from_pretrained()</a> and <code>from_pretrained</code>.</li> </ul>`,name:"pretrained_model_name_or_path"}]}}),ce=new wi({props:{$$slots:{default:[Pc]},$$scope:{ctx:B}}}),He=new y({props:{name:"push_to_hub",anchor:"transformers.file_utils.PushToHubMixin.push_to_hub",parameters:[{name:"repo_path_or_name",val:": typing.Optional[str] = None"},{name:"repo_url",val:": typing.Optional[str] = None"},{name:"use_temp_dir",val:": bool = False"},{name:"commit_message",val:": typing.Optional[str] = None"},{name:"organization",val:": typing.Optional[str] = None"},{name:"private",val:": typing.Optional[bool] = None"},{name:"use_auth_token",val:": typing.Union[bool, str, NoneType] = None"},{name:"**model_card_kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/file_utils.py#L2842",parametersDescription:[{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.repo_path_or_name",description:`<strong>repo_path_or_name</strong> (<code>str</code>, <em>optional</em>) &#x2014; Can either be a repository name for your processor in the Hub or a path to a local folder (in which case the repository will have the name of that local folder). If not specified, will default to the name given by <code>repo_url</code> and a local directory with that name will be created.`,name:"repo_path_or_name"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.repo_url",description:`<strong>repo_url</strong> (<code>str</code>, <em>optional</em>) &#x2014; Specify this in case you want to push to an existing repository in the hub. If unspecified, a new repository will be created in your namespace (unless you specify an <code>organization</code>) with <code>repo_name</code>.`,name:"repo_url"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.use_temp_dir",description:`<strong>use_temp_dir</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to clone the distant repo in a temporary directory or in <code>repo_path_or_name</code> inside the current working directory. This will slow things down if you are making changes in an existing repo since you will need to clone the repo before every push.`,name:"use_temp_dir"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.commit_message",description:`<strong>commit_message</strong> (<code>str</code>, <em>optional</em>) &#x2014; Message to commit while pushing. Will default to <code>&quot;add processor&quot;</code>.`,name:"commit_message"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.organization",description:`<strong>organization</strong> (<code>str</code>, <em>optional</em>) &#x2014; Organization in which you want to push your processor (you must be a member of this organization).`,name:"organization"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.private",description:`<strong>private</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not the repository created should be private (requires a paying subscription).`,name:"private"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.use_auth_token",description:`<strong>use_auth_token</strong> (<code>bool</code> or <code>str</code>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>transformers-cli login</code> (stored in <code>~/.huggingface</code>). Will default to <code>True</code> if <code>repo_url</code> is not specified.`,name:"use_auth_token"}],returnDescription:` <p>The url of the commit of your processor in the given repository.</p> `,returnType:` <p><code>str</code></p> `}}),Re=new As({props:{code:`from transformers import AutoProcessor processor = AutoProcessor.from_pretrained("bert-base-cased") # Push the processor to your namespace with the name "my-finetuned-bert" and have a local clone in the # *my-finetuned-bert* folder. processor.push_to_hub("my-finetuned-bert") # Push the processor to your namespace with the name "my-finetuned-bert" with no local clone. processor.push_to_hub("my-finetuned-bert", use_temp_dir=True) # Push the processor to an organization with the name "my-finetuned-bert" and have a local clone in the # *my-finetuned-bert* folder. processor.push_to_hub("my-finetuned-bert", organization="huggingface") # Make a change to an existing repo that has been cloned locally in *my-finetuned-bert*. processor.push_to_hub("my-finetuned-bert", repo_url="https://huggingface.co/sgugger/my-finetuned-bert")`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoProcessor processor = AutoProcessor.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-comment"># Push the processor to your namespace with the name &quot;my-finetuned-bert&quot; and have a local clone in the</span> <span class="hljs-comment"># *my-finetuned-bert* folder.</span> processor.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>) <span class="hljs-comment"># Push the processor to your namespace with the name &quot;my-finetuned-bert&quot; with no local clone.</span> processor.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, use_temp_dir=<span class="hljs-literal">True</span>) <span class="hljs-comment"># Push the processor to an organization with the name &quot;my-finetuned-bert&quot; and have a local clone in the</span> <span class="hljs-comment"># *my-finetuned-bert* folder.</span> processor.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, organization=<span class="hljs-string">&quot;huggingface&quot;</span>) <span class="hljs-comment"># Make a change to an existing repo that has been cloned locally in *my-finetuned-bert*.</span> processor.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, repo_url=<span class="hljs-string">&quot;https://huggingface.co/sgugger/my-finetuned-bert&quot;</span>)`}}),We=new y({props:{name:"register_for_auto_class",anchor:"transformers.ProcessorMixin.register_for_auto_class",parameters:[{name:"auto_class",val:" = 'AutoProcessor'"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/processing_utils.py#L190",parametersDescription:[{anchor:"transformers.ProcessorMixin.register_for_auto_class.auto_class",description:`<strong>auto_class</strong> (<code>str</code> or <code>type</code>, <em>optional</em>, defaults to <code>&quot;AutoProcessor&quot;</code>) &#x2014; The auto class to register this new feature extractor with.`,name:"auto_class"}]}}),fe=new wi({props:{warning:"&lcub;true}",$$slots:{default:[qc]},$$scope:{ctx:B}}}),Je=new y({props:{name:"save_pretrained",anchor:"transformers.ProcessorMixin.save_pretrained",parameters:[{name:"save_directory",val:""},{name:"push_to_hub",val:": bool = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/processing_utils.py#L95",parametersDescription:[{anchor:"transformers.ProcessorMixin.save_pretrained.save_directory",description:`<strong>save_directory</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; Directory where the feature extractor JSON file and the tokenizer files will be saved (directory will be created if it does not exist).`,name:"save_directory"},{anchor:"transformers.ProcessorMixin.save_pretrained.push_to_hub",description:`<strong>push_to_hub</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to push your processor to the Hugging Face model hub after saving it.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p>Using <code>push_to_hub=True</code> will synchronize the repository you are pushing to with <code>save_directory</code>, which requires <code>save_directory</code> to be a local clone of the repo you are pushing to if it&#x2019;s an existing folder. Pass along <code>temp_dir=True</code> to use a temporary directory instead.</p> </div> <p>kwargs &#x2014; Additional key word arguments passed along to the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.file_utils.PushToHubMixin.push_to_hub">push_to_hub()</a> method.`,name:"push_to_hub"}]}}),de=new wi({props:{$$slots:{default:[kc]},$$scope:{ctx:B}}}),Ke=new Oe({}),Ye=new y({props:{name:"class transformers.DataProcessor",anchor:"transformers.DataProcessor",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/processors/utils.py#L81"}}),Ze=new y({props:{name:"get_dev_examples",anchor:"transformers.DataProcessor.get_dev_examples",parameters:[{name:"data_dir",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/processors/utils.py#L98"}}),tt=new y({props:{name:"get_example_from_tensor_dict",anchor:"transformers.DataProcessor.get_example_from_tensor_dict",parameters:[{name:"tensor_dict",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/processors/utils.py#L84"}}),rt=new y({props:{name:"get_labels",anchor:"transformers.DataProcessor.get_labels",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/processors/utils.py#L106"}}),st=new y({props:{name:"get_test_examples",anchor:"transformers.DataProcessor.get_test_examples",parameters:[{name:"data_dir",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/processors/utils.py#L102"}}),ot=new y({props:{name:"get_train_examples",anchor:"transformers.DataProcessor.get_train_examples",parameters:[{name:"data_dir",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/processors/utils.py#L94"}}),lt=new y({props:{name:"tfds_map",anchor:"transformers.DataProcessor.tfds_map",parameters:[{name:"example",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/processors/utils.py#L110"}}),it=new y({props:{name:"class transformers.InputExample",anchor:"transformers.InputExample",parameters:[{name:"guid",val:": str"},{name:"text_a",val:": str"},{name:"text_b",val:": typing.Optional[str] = None"},{name:"label",val:": typing.Optional[str] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/processors/utils.py#L31"}}),pt=new y({props:{name:"to_json_string",anchor:"transformers.InputExample.to_json_string",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/processors/utils.py#L50"}}),ct=new y({props:{name:"class transformers.InputFeatures",anchor:"transformers.InputFeatures",parameters:[{name:"input_ids",val:": typing.List[int]"},{name:"attention_mask",val:": typing.Optional[typing.List[int]] = None"},{name:"token_type_ids",val:": typing.Optional[typing.List[int]] = None"},{name:"label",val:": typing.Union[int, float, NoneType] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/processors/utils.py#L56"}}),ft=new y({props:{name:"to_json_string",anchor:"transformers.InputFeatures.to_json_string",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/processors/utils.py#L76"}}),dt=new Oe({}),ut=new y({props:{name:"transformers.glue_convert_examples_to_features",anchor:"transformers.glue_convert_examples_to_features",parameters:[{name:"examples",val:": typing.Union[typing.List[transformers.data.processors.utils.InputExample], ForwardRef('tf.data.Dataset')]"},{name:"tokenizer",val:": PreTrainedTokenizer"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"task",val:" = None"},{name:"label_list",val:" = None"},{name:"output_mode",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/processors/glue.py#L42",returnDescription:` <p>If the <code>examples</code> input is a <code>tf.data.Dataset</code>, will return a <code>tf.data.Dataset</code> containing the task-specific features. If the input is a list of <code>InputExamples</code>, will return a list of task-specific <code>InputFeatures</code> which can be fed to the model.</p> `}}),_t=new Oe({}),Et=new Oe({}),qt=new Oe({}),It=new y({props:{name:"class transformers.data.processors.squad.SquadProcessor",anchor:"transformers.data.processors.squad.SquadProcessor",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/processors/squad.py#L543"}}),St=new y({props:{name:"get_dev_examples",anchor:"transformers.data.processors.squad.SquadProcessor.get_dev_examples",parameters:[{name:"data_dir",val:""},{name:"filename",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/processors/squad.py#L631"}}),At=new y({props:{name:"get_examples_from_dataset",anchor:"transformers.data.processors.squad.SquadProcessor.get_examples_from_dataset",parameters:[{name:"dataset",val:""},{name:"evaluate",val:" = False"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/processors/squad.py#L576",returnDescription:` <p>List of SquadExample</p> `}}),Dt=new As({props:{code:`import tensorflow_datasets as tfds dataset = tfds.load("squad") training_examples = get_examples_from_dataset(dataset, evaluate=False) evaluation_examples = get_examples_from_dataset(dataset, evaluate=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow_datasets <span class="hljs-keyword">as</span> tfds <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = tfds.load(<span class="hljs-string">&quot;squad&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>training_examples = get_examples_from_dataset(dataset, evaluate=<span class="hljs-literal">False</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>evaluation_examples = get_examples_from_dataset(dataset, evaluate=<span class="hljs-literal">True</span>)`}}),Tt=new y({props:{name:"get_train_examples",anchor:"transformers.data.processors.squad.SquadProcessor.get_train_examples",parameters:[{name:"data_dir",val:""},{name:"filename",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/processors/squad.py#L609"}}),Nt=new y({props:{name:"transformers.squad_convert_examples_to_features",anchor:"transformers.squad_convert_examples_to_features",parameters:[{name:"examples",val:""},{name:"tokenizer",val:""},{name:"max_seq_length",val:""},{name:"doc_stride",val:""},{name:"max_query_length",val:""},{name:"is_training",val:""},{name:"padding_strategy",val:" = 'max_length'"},{name:"return_dataset",val:" = False"},{name:"threads",val:" = 1"},{name:"tqdm_enabled",val:" = True"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/processors/squad.py#L318"}}),Mt=new As({props:{code:`processor = SquadV2Processor() examples = processor.get_dev_examples(data_dir) features = squad_convert_examples_to_features( examples=examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=not evaluate, )`,highlighted:`processor = SquadV2Processor() examples = processor.get_dev_examples(data_dir) features = squad_convert_examples_to_features( examples=examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=<span class="hljs-keyword">not</span> evaluate, )`}}),jt=new Oe({}),Vt=new As({props:{code:`# Loading a V2 processor processor = SquadV2Processor() examples = processor.get_dev_examples(squad_v2_data_dir) # Loading a V1 processor processor = SquadV1Processor() examples = processor.get_dev_examples(squad_v1_data_dir) features = squad_convert_examples_to_features( examples=examples, tokenizer=tokenizer, max_seq_length=max_seq_length, doc_stride=args.doc_stride, max_query_length=max_query_length, is_training=not evaluate, )`,highlighted:`<span class="hljs-comment"># Loading a V2 processor</span> processor = SquadV2Processor() examples = processor.get_dev_examples(squad_v2_data_dir) <span class="hljs-comment"># Loading a V1 processor</span> processor = SquadV1Processor() examples = processor.get_dev_examples(squad_v1_data_dir) features = squad_convert_examples_to_features( examples=examples, tokenizer=tokenizer, max_seq_length=max_seq_length, doc_stride=args.doc_stride, max_query_length=max_query_length, is_training=<span class="hljs-keyword">not</span> evaluate, )`}}),zt=new As({props:{code:`# tensorflow_datasets only handle Squad V1. tfds_examples = tfds.load("squad") examples = SquadV1Processor().get_examples_from_dataset(tfds_examples, evaluate=evaluate) features = squad_convert_examples_to_features( examples=examples, tokenizer=tokenizer, max_seq_length=max_seq_length, doc_stride=args.doc_stride, max_query_length=max_query_length, is_training=not evaluate, )`,highlighted:`<span class="hljs-comment"># tensorflow_datasets only handle Squad V1.</span> tfds_examples = tfds.load(<span class="hljs-string">&quot;squad&quot;</span>) examples = SquadV1Processor().get_examples_from_dataset(tfds_examples, evaluate=evaluate) features = squad_convert_examples_to_features( examples=examples, tokenizer=tokenizer, max_seq_length=max_seq_length, doc_stride=args.doc_stride, max_query_length=max_query_length, is_training=<span class="hljs-keyword">not</span> evaluate, )`}}),{c(){m=s("meta"),k=i(),b=s("h1"),x=s("a"),N=s("span"),h(q.$$.fragment),Q=i(),M=s("span"),L=n("Processors"),E=i(),V=s("p"),K=n("Processors can mean two different things in the Transformers library:"),Ls=i(),ie=s("ul"),Y=s("li"),Za=n("the objects that pre-process inputs for multi-modal models such as "),Ut=s("a"),eo=n("Wav2Vec2"),to=n(` (speech and text) or `),Ft=s("a"),ro=n("CLIP"),so=n(" (text and vision)"),ao=i(),_r=s("li"),oo=n("deprecated objects that were used in older versions of the library to preprocess data for GLUE or SQUAD."),Ds=i(),Z=s("h2"),pe=s("a"),gr=s("span"),h(Qe.$$.fragment),no=i(),vr=s("span"),lo=n("Multi-modal processors"),Ts=i(),Ht=s("p"),io=n(`Any multi-modal model will require an object to encode or decode the data that groups several modalities (among text, vision and audio). This is handled by objects called processors, which group tokenizers (for the text modality) and feature extractors (for vision and audio).`),Ns=i(),Gt=s("p"),po=n("Those processors inherit from the following base class that implements the saving and loading functionality:"),Ms=i(),S=s("div"),h(Ue.$$.fragment),co=i(),$r=s("p"),fo=n("This is a mixin used to provide saving/loading functionality for all processor classes."),mo=i(),G=s("div"),h(Fe.$$.fragment),ho=i(),br=s("p"),uo=n("Instantiate a processor associated with a pretrained model."),_o=i(),h(ce.$$.fragment),go=i(),z=s("div"),h(He.$$.fragment),vo=i(),Ge=s("p"),$o=n(`Upload the processor files to the \u{1F917} Model Hub while synchronizing a local clone of the repo in `),xr=s("code"),bo=n("repo_path_or_name"),xo=n("."),Eo=i(),Er=s("p"),wo=n("Examples:"),yo=i(),h(Re.$$.fragment),Po=i(),R=s("div"),h(We.$$.fragment),qo=i(),Xe=s("p"),ko=n(`Register this class with a given auto class. This should only be used for custom feature extractors as the ones in the library are already mapped with `),wr=s("code"),Io=n("AutoProcessor"),So=n("."),Ao=i(),h(fe.$$.fragment),Lo=i(),W=s("div"),h(Je.$$.fragment),Do=i(),Be=s("p"),To=n(`Saves the attributes of this processor (feature extractor, tokenizer\u2026) in the specified directory so that it can be reloaded using the `),Rt=s("a"),No=n("from_pretrained()"),Mo=n(" method."),jo=i(),h(de.$$.fragment),js=i(),ee=s("h2"),me=s("a"),yr=s("span"),h(Ke.$$.fragment),Vo=i(),Pr=s("span"),zo=n("Deprecated processors"),Vs=i(),T=s("p"),Co=n(`All processors follow the same architecture which is that of the `),Wt=s("a"),Oo=n("DataProcessor"),Qo=n(`. The processor returns a list of `),Xt=s("a"),Uo=n("InputExample"),Fo=n(`. These `),Jt=s("a"),Ho=n("InputExample"),Go=n(` can be converted to `),Bt=s("a"),Ro=n("InputFeatures"),Wo=n(" in order to be fed to the model."),zs=i(),P=s("div"),h(Ye.$$.fragment),Xo=i(),qr=s("p"),Jo=n("Base class for data converters for sequence classification data sets."),Bo=i(),he=s("div"),h(Ze.$$.fragment),Ko=i(),et=s("p"),Yo=n("Gets a collection of "),Kt=s("a"),Zo=n("InputExample"),en=n(" for the dev set."),tn=i(),ue=s("div"),h(tt.$$.fragment),rn=i(),kr=s("p"),sn=n("Gets an example from a dict with tensorflow tensors."),an=i(),_e=s("div"),h(rt.$$.fragment),on=i(),Ir=s("p"),nn=n("Gets the list of labels for this data set."),ln=i(),ge=s("div"),h(st.$$.fragment),pn=i(),at=s("p"),cn=n("Gets a collection of "),Yt=s("a"),fn=n("InputExample"),dn=n(" for the test set."),mn=i(),ve=s("div"),h(ot.$$.fragment),hn=i(),nt=s("p"),un=n("Gets a collection of "),Zt=s("a"),_n=n("InputExample"),gn=n(" for the train set."),vn=i(),$e=s("div"),h(lt.$$.fragment),$n=i(),Sr=s("p"),bn=n(`Some tensorflow_datasets datasets are not formatted the same way the GLUE datasets are. This method converts examples to the correct format.`),Cs=i(),U=s("div"),h(it.$$.fragment),xn=i(),Ar=s("p"),En=n("A single training/test example for simple sequence classification."),wn=i(),be=s("div"),h(pt.$$.fragment),yn=i(),Lr=s("p"),Pn=n("Serializes this instance to a JSON string."),Os=i(),F=s("div"),h(ct.$$.fragment),qn=i(),Dr=s("p"),kn=n("A single set of features of data. Property names are the same names as the corresponding inputs to a model."),In=i(),xe=s("div"),h(ft.$$.fragment),Sn=i(),Tr=s("p"),An=n("Serializes this instance to a JSON string."),Qs=i(),te=s("h2"),Ee=s("a"),Nr=s("span"),h(dt.$$.fragment),Ln=i(),Mr=s("span"),Dn=n("GLUE"),Us=i(),we=s("p"),mt=s("a"),Tn=n("General Language Understanding Evaluation (GLUE)"),Nn=n(` is a benchmark that evaluates the performance of models across a diverse set of existing NLU tasks. It was released together with the paper `),ht=s("a"),Mn=n(`GLUE: A multi-task benchmark and analysis platform for natural language understanding`),Fs=i(),er=s("p"),jn=n(`This library hosts a total of 10 processors for the following tasks: MRPC, MNLI, MNLI (mismatched), CoLA, SST2, STSB, QQP, QNLI, RTE and WNLI.`),Hs=i(),tr=s("p"),Vn=n("Those processors are:"),Gs=i(),w=s("ul"),jr=s("li"),Vr=s("code"),zn=n("MrpcProcessor"),Cn=i(),zr=s("li"),Cr=s("code"),On=n("MnliProcessor"),Qn=i(),Or=s("li"),Qr=s("code"),Un=n("MnliMismatchedProcessor"),Fn=i(),Ur=s("li"),Fr=s("code"),Hn=n("Sst2Processor"),Gn=i(),Hr=s("li"),Gr=s("code"),Rn=n("StsbProcessor"),Wn=i(),Rr=s("li"),Wr=s("code"),Xn=n("QqpProcessor"),Jn=i(),Xr=s("li"),Jr=s("code"),Bn=n("QnliProcessor"),Kn=i(),Br=s("li"),Kr=s("code"),Yn=n("RteProcessor"),Zn=i(),Yr=s("li"),Zr=s("code"),el=n("WnliProcessor"),Rs=i(),ye=s("p"),tl=n(`Additionally, the following method can be used to load values from a data file and convert them to a list of `),rr=s("a"),rl=n("InputExample"),sl=n("."),Ws=i(),re=s("div"),h(ut.$$.fragment),al=i(),sr=s("p"),ol=n("Loads a data file into a list of "),es=s("code"),nl=n("InputFeatures"),Xs=i(),se=s("h2"),Pe=s("a"),ts=s("span"),h(_t.$$.fragment),ll=i(),rs=s("span"),il=n("XNLI"),Js=i(),ae=s("p"),gt=s("a"),pl=n("The Cross-Lingual NLI Corpus (XNLI)"),cl=n(` is a benchmark that evaluates the quality of cross-lingual text representations. XNLI is crowd-sourced dataset based on `),vt=s("a"),ss=s("em"),fl=n("MultiNLI"),dl=n(`: pairs of text are labeled with textual entailment annotations for 15 different languages (including both high-resource language such as English and low-resource languages such as Swahili).`),Bs=i(),$t=s("p"),ml=n("It was released together with the paper "),bt=s("a"),hl=n("XNLI: Evaluating Cross-lingual Sentence Representations"),Ks=i(),ar=s("p"),ul=n("This library hosts the processor to load the XNLI data:"),Ys=i(),or=s("ul"),as=s("li"),os=s("code"),_l=n("XnliProcessor"),Zs=i(),nr=s("p"),gl=n("Please note that since the gold labels are available on the test set, evaluation is performed on the test set."),ea=i(),qe=s("p"),vl=n("An example using these processors is given in the "),xt=s("a"),$l=n("run_xnli.py"),bl=n(" script."),ta=i(),oe=s("h2"),ke=s("a"),ns=s("span"),h(Et.$$.fragment),xl=i(),ls=s("span"),El=n("SQuAD"),ra=i(),H=s("p"),wt=s("a"),wl=n("The Stanford Question Answering Dataset (SQuAD)"),yl=n(` is a benchmark that evaluates the performance of models on question answering. Two versions are available, v1.1 and v2.0. The first version (v1.1) was released together with the paper `),yt=s("a"),Pl=n("SQuAD: 100,000+ Questions for Machine Comprehension of Text"),ql=n(". The second version (v2.0) was released alongside the paper "),Pt=s("a"),kl=n(`Know What You Don\u2019t Know: Unanswerable Questions for SQuAD`),Il=n("."),sa=i(),lr=s("p"),Sl=n("This library hosts a processor for each of the two versions:"),aa=i(),ne=s("h3"),Ie=s("a"),is=s("span"),h(qt.$$.fragment),Al=i(),ps=s("span"),Ll=n("Processors"),oa=i(),ir=s("p"),Dl=n("Those processors are:"),na=i(),Se=s("ul"),cs=s("li"),fs=s("code"),Tl=n("SquadV1Processor"),Nl=i(),ds=s("li"),ms=s("code"),Ml=n("SquadV2Processor"),la=i(),kt=s("p"),jl=n("They both inherit from the abstract class "),hs=s("code"),Vl=n("SquadProcessor"),ia=i(),D=s("div"),h(It.$$.fragment),zl=i(),us=s("p"),Cl=n(`Processor for the SQuAD data set. overridden by SquadV1Processor and SquadV2Processor, used by the version 1.1 and version 2.0 of SQuAD, respectively.`),Ol=i(),Ae=s("div"),h(St.$$.fragment),Ql=i(),_s=s("p"),Ul=n("Returns the evaluation example from the data directory."),Fl=i(),C=s("div"),h(At.$$.fragment),Hl=i(),Lt=s("p"),Gl=n("Creates a list of "),gs=s("code"),Rl=n("SquadExample"),Wl=n("using a TFDS dataset."),Xl=i(),vs=s("p"),Jl=n("Examples:"),Bl=i(),h(Dt.$$.fragment),Kl=i(),Le=s("div"),h(Tt.$$.fragment),Yl=i(),$s=s("p"),Zl=n("Returns the training examples from the data directory."),pa=i(),De=s("p"),ei=n(`Additionally, the following method can be used to convert SQuAD examples into `),bs=s("code"),ti=n("SquadFeatures"),ri=n(" that can be used as model inputs."),ca=i(),j=s("div"),h(Nt.$$.fragment),si=i(),xs=s("p"),ai=n(`Converts a list of examples into a list of features that can be directly given as input to a model. It is model-dependant and takes advantage of many of the tokenizer\u2019s features to create the model\u2019s inputs.`),oi=i(),Es=s("p"),ni=n("Example:"),li=i(),h(Mt.$$.fragment),fa=i(),Te=s("p"),ii=n(`These processors as well as the aforementionned method can be used with files containing the data as well as with the `),ws=s("em"),pi=n("tensorflow_datasets"),ci=n(" package. Examples are given below."),da=i(),le=s("h3"),Ne=s("a"),ys=s("span"),h(jt.$$.fragment),fi=i(),Ps=s("span"),di=n("Example usage"),ma=i(),pr=s("p"),mi=n("Here is an example using the processors as well as the conversion method using data files:"),ha=i(),h(Vt.$$.fragment),ua=i(),Me=s("p"),hi=n("Using "),qs=s("em"),ui=n("tensorflow_datasets"),_i=n(" is as easy as using a data file:"),_a=i(),h(zt.$$.fragment),ga=i(),je=s("p"),gi=n("Another example using these processors is given in the "),Ct=s("a"),vi=n("run_squad.py"),$i=n(" script."),this.h()},l(e){const c=yc('[data-svelte="svelte-1phssyn"]',document.head);m=a(c,"META",{name:!0,content:!0}),c.forEach(r),k=p(e),b=a(e,"H1",{class:!0});var Ot=o(b);x=a(Ot,"A",{id:!0,class:!0,href:!0});var ks=o(x);N=a(ks,"SPAN",{});var Is=o(N);u(q.$$.fragment,Is),Is.forEach(r),ks.forEach(r),Q=p(Ot),M=a(Ot,"SPAN",{});var yi=o(M);L=l(yi,"Processors"),yi.forEach(r),Ot.forEach(r),E=p(e),V=a(e,"P",{});var Pi=o(V);K=l(Pi,"Processors can mean two different things in the Transformers library:"),Pi.forEach(r),Ls=p(e),ie=a(e,"UL",{});var $a=o(ie);Y=a($a,"LI",{});var cr=o(Y);Za=l(cr,"the objects that pre-process inputs for multi-modal models such as "),Ut=a(cr,"A",{href:!0});var qi=o(Ut);eo=l(qi,"Wav2Vec2"),qi.forEach(r),to=l(cr,` (speech and text) or `),Ft=a(cr,"A",{href:!0});var ki=o(Ft);ro=l(ki,"CLIP"),ki.forEach(r),so=l(cr," (text and vision)"),cr.forEach(r),ao=p($a),_r=a($a,"LI",{});var Ii=o(_r);oo=l(Ii,"deprecated objects that were used in older versions of the library to preprocess data for GLUE or SQUAD."),Ii.forEach(r),$a.forEach(r),Ds=p(e),Z=a(e,"H2",{class:!0});var ba=o(Z);pe=a(ba,"A",{id:!0,class:!0,href:!0});var Si=o(pe);gr=a(Si,"SPAN",{});var Ai=o(gr);u(Qe.$$.fragment,Ai),Ai.forEach(r),Si.forEach(r),no=p(ba),vr=a(ba,"SPAN",{});var Li=o(vr);lo=l(Li,"Multi-modal processors"),Li.forEach(r),ba.forEach(r),Ts=p(e),Ht=a(e,"P",{});var Di=o(Ht);io=l(Di,`Any multi-modal model will require an object to encode or decode the data that groups several modalities (among text, vision and audio). This is handled by objects called processors, which group tokenizers (for the text modality) and feature extractors (for vision and audio).`),Di.forEach(r),Ns=p(e),Gt=a(e,"P",{});var Ti=o(Gt);po=l(Ti,"Those processors inherit from the following base class that implements the saving and loading functionality:"),Ti.forEach(r),Ms=p(e),S=a(e,"DIV",{class:!0});var O=o(S);u(Ue.$$.fragment,O),co=p(O),$r=a(O,"P",{});var Ni=o($r);fo=l(Ni,"This is a mixin used to provide saving/loading functionality for all processor classes."),Ni.forEach(r),mo=p(O),G=a(O,"DIV",{class:!0});var fr=o(G);u(Fe.$$.fragment,fr),ho=p(fr),br=a(fr,"P",{});var Mi=o(br);uo=l(Mi,"Instantiate a processor associated with a pretrained model."),Mi.forEach(r),_o=p(fr),u(ce.$$.fragment,fr),fr.forEach(r),go=p(O),z=a(O,"DIV",{class:!0});var Ve=o(z);u(He.$$.fragment,Ve),vo=p(Ve),Ge=a(Ve,"P",{});var xa=o(Ge);$o=l(xa,`Upload the processor files to the \u{1F917} Model Hub while synchronizing a local clone of the repo in `),xr=a(xa,"CODE",{});var ji=o(xr);bo=l(ji,"repo_path_or_name"),ji.forEach(r),xo=l(xa,"."),xa.forEach(r),Eo=p(Ve),Er=a(Ve,"P",{});var Vi=o(Er);wo=l(Vi,"Examples:"),Vi.forEach(r),yo=p(Ve),u(Re.$$.fragment,Ve),Ve.forEach(r),Po=p(O),R=a(O,"DIV",{class:!0});var dr=o(R);u(We.$$.fragment,dr),qo=p(dr),Xe=a(dr,"P",{});var Ea=o(Xe);ko=l(Ea,`Register this class with a given auto class. This should only be used for custom feature extractors as the ones in the library are already mapped with `),wr=a(Ea,"CODE",{});var zi=o(wr);Io=l(zi,"AutoProcessor"),zi.forEach(r),So=l(Ea,"."),Ea.forEach(r),Ao=p(dr),u(fe.$$.fragment,dr),dr.forEach(r),Lo=p(O),W=a(O,"DIV",{class:!0});var mr=o(W);u(Je.$$.fragment,mr),Do=p(mr),Be=a(mr,"P",{});var wa=o(Be);To=l(wa,`Saves the attributes of this processor (feature extractor, tokenizer\u2026) in the specified directory so that it can be reloaded using the `),Rt=a(wa,"A",{href:!0});var Ci=o(Rt);No=l(Ci,"from_pretrained()"),Ci.forEach(r),Mo=l(wa," method."),wa.forEach(r),jo=p(mr),u(de.$$.fragment,mr),mr.forEach(r),O.forEach(r),js=p(e),ee=a(e,"H2",{class:!0});var ya=o(ee);me=a(ya,"A",{id:!0,class:!0,href:!0});var Oi=o(me);yr=a(Oi,"SPAN",{});var Qi=o(yr);u(Ke.$$.fragment,Qi),Qi.forEach(r),Oi.forEach(r),Vo=p(ya),Pr=a(ya,"SPAN",{});var Ui=o(Pr);zo=l(Ui,"Deprecated processors"),Ui.forEach(r),ya.forEach(r),Vs=p(e),T=a(e,"P",{});var X=o(T);Co=l(X,`All processors follow the same architecture which is that of the `),Wt=a(X,"A",{href:!0});var Fi=o(Wt);Oo=l(Fi,"DataProcessor"),Fi.forEach(r),Qo=l(X,`. The processor returns a list of `),Xt=a(X,"A",{href:!0});var Hi=o(Xt);Uo=l(Hi,"InputExample"),Hi.forEach(r),Fo=l(X,`. These `),Jt=a(X,"A",{href:!0});var Gi=o(Jt);Ho=l(Gi,"InputExample"),Gi.forEach(r),Go=l(X,` can be converted to `),Bt=a(X,"A",{href:!0});var Ri=o(Bt);Ro=l(Ri,"InputFeatures"),Ri.forEach(r),Wo=l(X," in order to be fed to the model."),X.forEach(r),zs=p(e),P=a(e,"DIV",{class:!0});var A=o(P);u(Ye.$$.fragment,A),Xo=p(A),qr=a(A,"P",{});var Wi=o(qr);Jo=l(Wi,"Base class for data converters for sequence classification data sets."),Wi.forEach(r),Bo=p(A),he=a(A,"DIV",{class:!0});var Pa=o(he);u(Ze.$$.fragment,Pa),Ko=p(Pa),et=a(Pa,"P",{});var qa=o(et);Yo=l(qa,"Gets a collection of "),Kt=a(qa,"A",{href:!0});var Xi=o(Kt);Zo=l(Xi,"InputExample"),Xi.forEach(r),en=l(qa," for the dev set."),qa.forEach(r),Pa.forEach(r),tn=p(A),ue=a(A,"DIV",{class:!0});var ka=o(ue);u(tt.$$.fragment,ka),rn=p(ka),kr=a(ka,"P",{});var Ji=o(kr);sn=l(Ji,"Gets an example from a dict with tensorflow tensors."),Ji.forEach(r),ka.forEach(r),an=p(A),_e=a(A,"DIV",{class:!0});var Ia=o(_e);u(rt.$$.fragment,Ia),on=p(Ia),Ir=a(Ia,"P",{});var Bi=o(Ir);nn=l(Bi,"Gets the list of labels for this data set."),Bi.forEach(r),Ia.forEach(r),ln=p(A),ge=a(A,"DIV",{class:!0});var Sa=o(ge);u(st.$$.fragment,Sa),pn=p(Sa),at=a(Sa,"P",{});var Aa=o(at);cn=l(Aa,"Gets a collection of "),Yt=a(Aa,"A",{href:!0});var Ki=o(Yt);fn=l(Ki,"InputExample"),Ki.forEach(r),dn=l(Aa," for the test set."),Aa.forEach(r),Sa.forEach(r),mn=p(A),ve=a(A,"DIV",{class:!0});var La=o(ve);u(ot.$$.fragment,La),hn=p(La),nt=a(La,"P",{});var Da=o(nt);un=l(Da,"Gets a collection of "),Zt=a(Da,"A",{href:!0});var Yi=o(Zt);_n=l(Yi,"InputExample"),Yi.forEach(r),gn=l(Da," for the train set."),Da.forEach(r),La.forEach(r),vn=p(A),$e=a(A,"DIV",{class:!0});var Ta=o($e);u(lt.$$.fragment,Ta),$n=p(Ta),Sr=a(Ta,"P",{});var Zi=o(Sr);bn=l(Zi,`Some tensorflow_datasets datasets are not formatted the same way the GLUE datasets are. This method converts examples to the correct format.`),Zi.forEach(r),Ta.forEach(r),A.forEach(r),Cs=p(e),U=a(e,"DIV",{class:!0});var hr=o(U);u(it.$$.fragment,hr),xn=p(hr),Ar=a(hr,"P",{});var ep=o(Ar);En=l(ep,"A single training/test example for simple sequence classification."),ep.forEach(r),wn=p(hr),be=a(hr,"DIV",{class:!0});var Na=o(be);u(pt.$$.fragment,Na),yn=p(Na),Lr=a(Na,"P",{});var tp=o(Lr);Pn=l(tp,"Serializes this instance to a JSON string."),tp.forEach(r),Na.forEach(r),hr.forEach(r),Os=p(e),F=a(e,"DIV",{class:!0});var ur=o(F);u(ct.$$.fragment,ur),qn=p(ur),Dr=a(ur,"P",{});var rp=o(Dr);kn=l(rp,"A single set of features of data. Property names are the same names as the corresponding inputs to a model."),rp.forEach(r),In=p(ur),xe=a(ur,"DIV",{class:!0});var Ma=o(xe);u(ft.$$.fragment,Ma),Sn=p(Ma),Tr=a(Ma,"P",{});var sp=o(Tr);An=l(sp,"Serializes this instance to a JSON string."),sp.forEach(r),Ma.forEach(r),ur.forEach(r),Qs=p(e),te=a(e,"H2",{class:!0});var ja=o(te);Ee=a(ja,"A",{id:!0,class:!0,href:!0});var ap=o(Ee);Nr=a(ap,"SPAN",{});var op=o(Nr);u(dt.$$.fragment,op),op.forEach(r),ap.forEach(r),Ln=p(ja),Mr=a(ja,"SPAN",{});var np=o(Mr);Dn=l(np,"GLUE"),np.forEach(r),ja.forEach(r),Us=p(e),we=a(e,"P",{});var Va=o(we);mt=a(Va,"A",{href:!0,rel:!0});var lp=o(mt);Tn=l(lp,"General Language Understanding Evaluation (GLUE)"),lp.forEach(r),Nn=l(Va,` is a benchmark that evaluates the performance of models across a diverse set of existing NLU tasks. It was released together with the paper `),ht=a(Va,"A",{href:!0,rel:!0});var ip=o(ht);Mn=l(ip,`GLUE: A multi-task benchmark and analysis platform for natural language understanding`),ip.forEach(r),Va.forEach(r),Fs=p(e),er=a(e,"P",{});var pp=o(er);jn=l(pp,`This library hosts a total of 10 processors for the following tasks: MRPC, MNLI, MNLI (mismatched), CoLA, SST2, STSB, QQP, QNLI, RTE and WNLI.`),pp.forEach(r),Hs=p(e),tr=a(e,"P",{});var cp=o(tr);Vn=l(cp,"Those processors are:"),cp.forEach(r),Gs=p(e),w=a(e,"UL",{});var I=o(w);jr=a(I,"LI",{});var fp=o(jr);Vr=a(fp,"CODE",{});var dp=o(Vr);zn=l(dp,"MrpcProcessor"),dp.forEach(r),fp.forEach(r),Cn=p(I),zr=a(I,"LI",{});var mp=o(zr);Cr=a(mp,"CODE",{});var hp=o(Cr);On=l(hp,"MnliProcessor"),hp.forEach(r),mp.forEach(r),Qn=p(I),Or=a(I,"LI",{});var up=o(Or);Qr=a(up,"CODE",{});var _p=o(Qr);Un=l(_p,"MnliMismatchedProcessor"),_p.forEach(r),up.forEach(r),Fn=p(I),Ur=a(I,"LI",{});var gp=o(Ur);Fr=a(gp,"CODE",{});var vp=o(Fr);Hn=l(vp,"Sst2Processor"),vp.forEach(r),gp.forEach(r),Gn=p(I),Hr=a(I,"LI",{});var $p=o(Hr);Gr=a($p,"CODE",{});var bp=o(Gr);Rn=l(bp,"StsbProcessor"),bp.forEach(r),$p.forEach(r),Wn=p(I),Rr=a(I,"LI",{});var xp=o(Rr);Wr=a(xp,"CODE",{});var Ep=o(Wr);Xn=l(Ep,"QqpProcessor"),Ep.forEach(r),xp.forEach(r),Jn=p(I),Xr=a(I,"LI",{});var wp=o(Xr);Jr=a(wp,"CODE",{});var yp=o(Jr);Bn=l(yp,"QnliProcessor"),yp.forEach(r),wp.forEach(r),Kn=p(I),Br=a(I,"LI",{});var Pp=o(Br);Kr=a(Pp,"CODE",{});var qp=o(Kr);Yn=l(qp,"RteProcessor"),qp.forEach(r),Pp.forEach(r),Zn=p(I),Yr=a(I,"LI",{});var kp=o(Yr);Zr=a(kp,"CODE",{});var Ip=o(Zr);el=l(Ip,"WnliProcessor"),Ip.forEach(r),kp.forEach(r),I.forEach(r),Rs=p(e),ye=a(e,"P",{});var za=o(ye);tl=l(za,`Additionally, the following method can be used to load values from a data file and convert them to a list of `),rr=a(za,"A",{href:!0});var Sp=o(rr);rl=l(Sp,"InputExample"),Sp.forEach(r),sl=l(za,"."),za.forEach(r),Ws=p(e),re=a(e,"DIV",{class:!0});var Ca=o(re);u(ut.$$.fragment,Ca),al=p(Ca),sr=a(Ca,"P",{});var bi=o(sr);ol=l(bi,"Loads a data file into a list of "),es=a(bi,"CODE",{});var Ap=o(es);nl=l(Ap,"InputFeatures"),Ap.forEach(r),bi.forEach(r),Ca.forEach(r),Xs=p(e),se=a(e,"H2",{class:!0});var Oa=o(se);Pe=a(Oa,"A",{id:!0,class:!0,href:!0});var Lp=o(Pe);ts=a(Lp,"SPAN",{});var Dp=o(ts);u(_t.$$.fragment,Dp),Dp.forEach(r),Lp.forEach(r),ll=p(Oa),rs=a(Oa,"SPAN",{});var Tp=o(rs);il=l(Tp,"XNLI"),Tp.forEach(r),Oa.forEach(r),Js=p(e),ae=a(e,"P",{});var Ss=o(ae);gt=a(Ss,"A",{href:!0,rel:!0});var Np=o(gt);pl=l(Np,"The Cross-Lingual NLI Corpus (XNLI)"),Np.forEach(r),cl=l(Ss,` is a benchmark that evaluates the quality of cross-lingual text representations. XNLI is crowd-sourced dataset based on `),vt=a(Ss,"A",{href:!0,rel:!0});var Mp=o(vt);ss=a(Mp,"EM",{});var jp=o(ss);fl=l(jp,"MultiNLI"),jp.forEach(r),Mp.forEach(r),dl=l(Ss,`: pairs of text are labeled with textual entailment annotations for 15 different languages (including both high-resource language such as English and low-resource languages such as Swahili).`),Ss.forEach(r),Bs=p(e),$t=a(e,"P",{});var xi=o($t);ml=l(xi,"It was released together with the paper "),bt=a(xi,"A",{href:!0,rel:!0});var Vp=o(bt);hl=l(Vp,"XNLI: Evaluating Cross-lingual Sentence Representations"),Vp.forEach(r),xi.forEach(r),Ks=p(e),ar=a(e,"P",{});var zp=o(ar);ul=l(zp,"This library hosts the processor to load the XNLI data:"),zp.forEach(r),Ys=p(e),or=a(e,"UL",{});var Cp=o(or);as=a(Cp,"LI",{});var Op=o(as);os=a(Op,"CODE",{});var Qp=o(os);_l=l(Qp,"XnliProcessor"),Qp.forEach(r),Op.forEach(r),Cp.forEach(r),Zs=p(e),nr=a(e,"P",{});var Up=o(nr);gl=l(Up,"Please note that since the gold labels are available on the test set, evaluation is performed on the test set."),Up.forEach(r),ea=p(e),qe=a(e,"P",{});var Qa=o(qe);vl=l(Qa,"An example using these processors is given in the "),xt=a(Qa,"A",{href:!0,rel:!0});var Fp=o(xt);$l=l(Fp,"run_xnli.py"),Fp.forEach(r),bl=l(Qa," script."),Qa.forEach(r),ta=p(e),oe=a(e,"H2",{class:!0});var Ua=o(oe);ke=a(Ua,"A",{id:!0,class:!0,href:!0});var Hp=o(ke);ns=a(Hp,"SPAN",{});var Gp=o(ns);u(Et.$$.fragment,Gp),Gp.forEach(r),Hp.forEach(r),xl=p(Ua),ls=a(Ua,"SPAN",{});var Rp=o(ls);El=l(Rp,"SQuAD"),Rp.forEach(r),Ua.forEach(r),ra=p(e),H=a(e,"P",{});var Qt=o(H);wt=a(Qt,"A",{href:!0,rel:!0});var Wp=o(wt);wl=l(Wp,"The Stanford Question Answering Dataset (SQuAD)"),Wp.forEach(r),yl=l(Qt,` is a benchmark that evaluates the performance of models on question answering. Two versions are available, v1.1 and v2.0. The first version (v1.1) was released together with the paper `),yt=a(Qt,"A",{href:!0,rel:!0});var Xp=o(yt);Pl=l(Xp,"SQuAD: 100,000+ Questions for Machine Comprehension of Text"),Xp.forEach(r),ql=l(Qt,". The second version (v2.0) was released alongside the paper "),Pt=a(Qt,"A",{href:!0,rel:!0});var Jp=o(Pt);kl=l(Jp,`Know What You Don\u2019t Know: Unanswerable Questions for SQuAD`),Jp.forEach(r),Il=l(Qt,"."),Qt.forEach(r),sa=p(e),lr=a(e,"P",{});var Bp=o(lr);Sl=l(Bp,"This library hosts a processor for each of the two versions:"),Bp.forEach(r),aa=p(e),ne=a(e,"H3",{class:!0});var Fa=o(ne);Ie=a(Fa,"A",{id:!0,class:!0,href:!0});var Kp=o(Ie);is=a(Kp,"SPAN",{});var Yp=o(is);u(qt.$$.fragment,Yp),Yp.forEach(r),Kp.forEach(r),Al=p(Fa),ps=a(Fa,"SPAN",{});var Zp=o(ps);Ll=l(Zp,"Processors"),Zp.forEach(r),Fa.forEach(r),oa=p(e),ir=a(e,"P",{});var ec=o(ir);Dl=l(ec,"Those processors are:"),ec.forEach(r),na=p(e),Se=a(e,"UL",{});var Ha=o(Se);cs=a(Ha,"LI",{});var tc=o(cs);fs=a(tc,"CODE",{});var rc=o(fs);Tl=l(rc,"SquadV1Processor"),rc.forEach(r),tc.forEach(r),Nl=p(Ha),ds=a(Ha,"LI",{});var sc=o(ds);ms=a(sc,"CODE",{});var ac=o(ms);Ml=l(ac,"SquadV2Processor"),ac.forEach(r),sc.forEach(r),Ha.forEach(r),la=p(e),kt=a(e,"P",{});var Ei=o(kt);jl=l(Ei,"They both inherit from the abstract class "),hs=a(Ei,"CODE",{});var oc=o(hs);Vl=l(oc,"SquadProcessor"),oc.forEach(r),Ei.forEach(r),ia=p(e),D=a(e,"DIV",{class:!0});var J=o(D);u(It.$$.fragment,J),zl=p(J),us=a(J,"P",{});var nc=o(us);Cl=l(nc,`Processor for the SQuAD data set. overridden by SquadV1Processor and SquadV2Processor, used by the version 1.1 and version 2.0 of SQuAD, respectively.`),nc.forEach(r),Ol=p(J),Ae=a(J,"DIV",{class:!0});var Ga=o(Ae);u(St.$$.fragment,Ga),Ql=p(Ga),_s=a(Ga,"P",{});var lc=o(_s);Ul=l(lc,"Returns the evaluation example from the data directory."),lc.forEach(r),Ga.forEach(r),Fl=p(J),C=a(J,"DIV",{class:!0});var ze=o(C);u(At.$$.fragment,ze),Hl=p(ze),Lt=a(ze,"P",{});var Ra=o(Lt);Gl=l(Ra,"Creates a list of "),gs=a(Ra,"CODE",{});var ic=o(gs);Rl=l(ic,"SquadExample"),ic.forEach(r),Wl=l(Ra,"using a TFDS dataset."),Ra.forEach(r),Xl=p(ze),vs=a(ze,"P",{});var pc=o(vs);Jl=l(pc,"Examples:"),pc.forEach(r),Bl=p(ze),u(Dt.$$.fragment,ze),ze.forEach(r),Kl=p(J),Le=a(J,"DIV",{class:!0});var Wa=o(Le);u(Tt.$$.fragment,Wa),Yl=p(Wa),$s=a(Wa,"P",{});var cc=o($s);Zl=l(cc,"Returns the training examples from the data directory."),cc.forEach(r),Wa.forEach(r),J.forEach(r),pa=p(e),De=a(e,"P",{});var Xa=o(De);ei=l(Xa,`Additionally, the following method can be used to convert SQuAD examples into `),bs=a(Xa,"CODE",{});var fc=o(bs);ti=l(fc,"SquadFeatures"),fc.forEach(r),ri=l(Xa," that can be used as model inputs."),Xa.forEach(r),ca=p(e),j=a(e,"DIV",{class:!0});var Ce=o(j);u(Nt.$$.fragment,Ce),si=p(Ce),xs=a(Ce,"P",{});var dc=o(xs);ai=l(dc,`Converts a list of examples into a list of features that can be directly given as input to a model. It is model-dependant and takes advantage of many of the tokenizer\u2019s features to create the model\u2019s inputs.`),dc.forEach(r),oi=p(Ce),Es=a(Ce,"P",{});var mc=o(Es);ni=l(mc,"Example:"),mc.forEach(r),li=p(Ce),u(Mt.$$.fragment,Ce),Ce.forEach(r),fa=p(e),Te=a(e,"P",{});var Ja=o(Te);ii=l(Ja,`These processors as well as the aforementionned method can be used with files containing the data as well as with the `),ws=a(Ja,"EM",{});var hc=o(ws);pi=l(hc,"tensorflow_datasets"),hc.forEach(r),ci=l(Ja," package. Examples are given below."),Ja.forEach(r),da=p(e),le=a(e,"H3",{class:!0});var Ba=o(le);Ne=a(Ba,"A",{id:!0,class:!0,href:!0});var uc=o(Ne);ys=a(uc,"SPAN",{});var _c=o(ys);u(jt.$$.fragment,_c),_c.forEach(r),uc.forEach(r),fi=p(Ba),Ps=a(Ba,"SPAN",{});var gc=o(Ps);di=l(gc,"Example usage"),gc.forEach(r),Ba.forEach(r),ma=p(e),pr=a(e,"P",{});var vc=o(pr);mi=l(vc,"Here is an example using the processors as well as the conversion method using data files:"),vc.forEach(r),ha=p(e),u(Vt.$$.fragment,e),ua=p(e),Me=a(e,"P",{});var Ka=o(Me);hi=l(Ka,"Using "),qs=a(Ka,"EM",{});var $c=o(qs);ui=l($c,"tensorflow_datasets"),$c.forEach(r),_i=l(Ka," is as easy as using a data file:"),Ka.forEach(r),_a=p(e),u(zt.$$.fragment,e),ga=p(e),je=a(e,"P",{});var Ya=o(je);gi=l(Ya,"Another example using these processors is given in the "),Ct=a(Ya,"A",{href:!0,rel:!0});var bc=o(Ct);vi=l(bc,"run_squad.py"),bc.forEach(r),$i=l(Ya," script."),Ya.forEach(r),this.h()},h(){f(m,"name","hf:doc:metadata"),f(m,"content",JSON.stringify(Sc)),f(x,"id","processors"),f(x,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(x,"href","#processors"),f(b,"class","relative group"),f(Ut,"href","../model_doc/wav2vec2"),f(Ft,"href","../model_doc/clip"),f(pe,"id","transformers.ProcessorMixin"),f(pe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(pe,"href","#transformers.ProcessorMixin"),f(Z,"class","relative group"),f(G,"class","docstring"),f(z,"class","docstring"),f(R,"class","docstring"),f(Rt,"href","/docs/transformers/pr_16143/en/main_classes/processors#transformers.ProcessorMixin.from_pretrained"),f(W,"class","docstring"),f(S,"class","docstring"),f(me,"id","transformers.DataProcessor"),f(me,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(me,"href","#transformers.DataProcessor"),f(ee,"class","relative group"),f(Wt,"href","/docs/transformers/pr_16143/en/main_classes/processors#transformers.DataProcessor"),f(Xt,"href","/docs/transformers/pr_16143/en/main_classes/processors#transformers.InputExample"),f(Jt,"href","/docs/transformers/pr_16143/en/main_classes/processors#transformers.InputExample"),f(Bt,"href","/docs/transformers/pr_16143/en/main_classes/processors#transformers.InputFeatures"),f(Kt,"href","/docs/transformers/pr_16143/en/main_classes/processors#transformers.InputExample"),f(he,"class","docstring"),f(ue,"class","docstring"),f(_e,"class","docstring"),f(Yt,"href","/docs/transformers/pr_16143/en/main_classes/processors#transformers.InputExample"),f(ge,"class","docstring"),f(Zt,"href","/docs/transformers/pr_16143/en/main_classes/processors#transformers.InputExample"),f(ve,"class","docstring"),f($e,"class","docstring"),f(P,"class","docstring"),f(be,"class","docstring"),f(U,"class","docstring"),f(xe,"class","docstring"),f(F,"class","docstring"),f(Ee,"id","transformers.glue_convert_examples_to_features"),f(Ee,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(Ee,"href","#transformers.glue_convert_examples_to_features"),f(te,"class","relative group"),f(mt,"href","https://gluebenchmark.com/"),f(mt,"rel","nofollow"),f(ht,"href","https://openreview.net/pdf?id=rJ4km2R5t7"),f(ht,"rel","nofollow"),f(rr,"href","/docs/transformers/pr_16143/en/main_classes/processors#transformers.InputExample"),f(re,"class","docstring"),f(Pe,"id","xnli"),f(Pe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(Pe,"href","#xnli"),f(se,"class","relative group"),f(gt,"href","https://www.nyu.edu/projects/bowman/xnli/"),f(gt,"rel","nofollow"),f(vt,"href","http://www.nyu.edu/projects/bowman/multinli/"),f(vt,"rel","nofollow"),f(bt,"href","https://arxiv.org/abs/1809.05053"),f(bt,"rel","nofollow"),f(xt,"href","https://github.com/huggingface/transformers/tree/master/examples/legacy/text-classification/run_xnli.py"),f(xt,"rel","nofollow"),f(ke,"id","squad"),f(ke,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(ke,"href","#squad"),f(oe,"class","relative group"),f(wt,"href","https://rajpurkar.github.io/SQuAD-explorer//"),f(wt,"rel","nofollow"),f(yt,"href","https://arxiv.org/abs/1606.05250"),f(yt,"rel","nofollow"),f(Pt,"href","https://arxiv.org/abs/1806.03822"),f(Pt,"rel","nofollow"),f(Ie,"id","transformers.data.processors.squad.SquadProcessor"),f(Ie,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(Ie,"href","#transformers.data.processors.squad.SquadProcessor"),f(ne,"class","relative group"),f(Ae,"class","docstring"),f(C,"class","docstring"),f(Le,"class","docstring"),f(D,"class","docstring"),f(j,"class","docstring"),f(Ne,"id","example-usage"),f(Ne,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(Ne,"href","#example-usage"),f(le,"class","relative group"),f(Ct,"href","https://github.com/huggingface/transformers/tree/master/examples/legacy/question-answering/run_squad.py"),f(Ct,"rel","nofollow")},m(e,c){t(document.head,m),d(e,k,c),d(e,b,c),t(b,x),t(x,N),_(q,N,null),t(b,Q),t(b,M),t(M,L),d(e,E,c),d(e,V,c),t(V,K),d(e,Ls,c),d(e,ie,c),t(ie,Y),t(Y,Za),t(Y,Ut),t(Ut,eo),t(Y,to),t(Y,Ft),t(Ft,ro),t(Y,so),t(ie,ao),t(ie,_r),t(_r,oo),d(e,Ds,c),d(e,Z,c),t(Z,pe),t(pe,gr),_(Qe,gr,null),t(Z,no),t(Z,vr),t(vr,lo),d(e,Ts,c),d(e,Ht,c),t(Ht,io),d(e,Ns,c),d(e,Gt,c),t(Gt,po),d(e,Ms,c),d(e,S,c),_(Ue,S,null),t(S,co),t(S,$r),t($r,fo),t(S,mo),t(S,G),_(Fe,G,null),t(G,ho),t(G,br),t(br,uo),t(G,_o),_(ce,G,null),t(S,go),t(S,z),_(He,z,null),t(z,vo),t(z,Ge),t(Ge,$o),t(Ge,xr),t(xr,bo),t(Ge,xo),t(z,Eo),t(z,Er),t(Er,wo),t(z,yo),_(Re,z,null),t(S,Po),t(S,R),_(We,R,null),t(R,qo),t(R,Xe),t(Xe,ko),t(Xe,wr),t(wr,Io),t(Xe,So),t(R,Ao),_(fe,R,null),t(S,Lo),t(S,W),_(Je,W,null),t(W,Do),t(W,Be),t(Be,To),t(Be,Rt),t(Rt,No),t(Be,Mo),t(W,jo),_(de,W,null),d(e,js,c),d(e,ee,c),t(ee,me),t(me,yr),_(Ke,yr,null),t(ee,Vo),t(ee,Pr),t(Pr,zo),d(e,Vs,c),d(e,T,c),t(T,Co),t(T,Wt),t(Wt,Oo),t(T,Qo),t(T,Xt),t(Xt,Uo),t(T,Fo),t(T,Jt),t(Jt,Ho),t(T,Go),t(T,Bt),t(Bt,Ro),t(T,Wo),d(e,zs,c),d(e,P,c),_(Ye,P,null),t(P,Xo),t(P,qr),t(qr,Jo),t(P,Bo),t(P,he),_(Ze,he,null),t(he,Ko),t(he,et),t(et,Yo),t(et,Kt),t(Kt,Zo),t(et,en),t(P,tn),t(P,ue),_(tt,ue,null),t(ue,rn),t(ue,kr),t(kr,sn),t(P,an),t(P,_e),_(rt,_e,null),t(_e,on),t(_e,Ir),t(Ir,nn),t(P,ln),t(P,ge),_(st,ge,null),t(ge,pn),t(ge,at),t(at,cn),t(at,Yt),t(Yt,fn),t(at,dn),t(P,mn),t(P,ve),_(ot,ve,null),t(ve,hn),t(ve,nt),t(nt,un),t(nt,Zt),t(Zt,_n),t(nt,gn),t(P,vn),t(P,$e),_(lt,$e,null),t($e,$n),t($e,Sr),t(Sr,bn),d(e,Cs,c),d(e,U,c),_(it,U,null),t(U,xn),t(U,Ar),t(Ar,En),t(U,wn),t(U,be),_(pt,be,null),t(be,yn),t(be,Lr),t(Lr,Pn),d(e,Os,c),d(e,F,c),_(ct,F,null),t(F,qn),t(F,Dr),t(Dr,kn),t(F,In),t(F,xe),_(ft,xe,null),t(xe,Sn),t(xe,Tr),t(Tr,An),d(e,Qs,c),d(e,te,c),t(te,Ee),t(Ee,Nr),_(dt,Nr,null),t(te,Ln),t(te,Mr),t(Mr,Dn),d(e,Us,c),d(e,we,c),t(we,mt),t(mt,Tn),t(we,Nn),t(we,ht),t(ht,Mn),d(e,Fs,c),d(e,er,c),t(er,jn),d(e,Hs,c),d(e,tr,c),t(tr,Vn),d(e,Gs,c),d(e,w,c),t(w,jr),t(jr,Vr),t(Vr,zn),t(w,Cn),t(w,zr),t(zr,Cr),t(Cr,On),t(w,Qn),t(w,Or),t(Or,Qr),t(Qr,Un),t(w,Fn),t(w,Ur),t(Ur,Fr),t(Fr,Hn),t(w,Gn),t(w,Hr),t(Hr,Gr),t(Gr,Rn),t(w,Wn),t(w,Rr),t(Rr,Wr),t(Wr,Xn),t(w,Jn),t(w,Xr),t(Xr,Jr),t(Jr,Bn),t(w,Kn),t(w,Br),t(Br,Kr),t(Kr,Yn),t(w,Zn),t(w,Yr),t(Yr,Zr),t(Zr,el),d(e,Rs,c),d(e,ye,c),t(ye,tl),t(ye,rr),t(rr,rl),t(ye,sl),d(e,Ws,c),d(e,re,c),_(ut,re,null),t(re,al),t(re,sr),t(sr,ol),t(sr,es),t(es,nl),d(e,Xs,c),d(e,se,c),t(se,Pe),t(Pe,ts),_(_t,ts,null),t(se,ll),t(se,rs),t(rs,il),d(e,Js,c),d(e,ae,c),t(ae,gt),t(gt,pl),t(ae,cl),t(ae,vt),t(vt,ss),t(ss,fl),t(ae,dl),d(e,Bs,c),d(e,$t,c),t($t,ml),t($t,bt),t(bt,hl),d(e,Ks,c),d(e,ar,c),t(ar,ul),d(e,Ys,c),d(e,or,c),t(or,as),t(as,os),t(os,_l),d(e,Zs,c),d(e,nr,c),t(nr,gl),d(e,ea,c),d(e,qe,c),t(qe,vl),t(qe,xt),t(xt,$l),t(qe,bl),d(e,ta,c),d(e,oe,c),t(oe,ke),t(ke,ns),_(Et,ns,null),t(oe,xl),t(oe,ls),t(ls,El),d(e,ra,c),d(e,H,c),t(H,wt),t(wt,wl),t(H,yl),t(H,yt),t(yt,Pl),t(H,ql),t(H,Pt),t(Pt,kl),t(H,Il),d(e,sa,c),d(e,lr,c),t(lr,Sl),d(e,aa,c),d(e,ne,c),t(ne,Ie),t(Ie,is),_(qt,is,null),t(ne,Al),t(ne,ps),t(ps,Ll),d(e,oa,c),d(e,ir,c),t(ir,Dl),d(e,na,c),d(e,Se,c),t(Se,cs),t(cs,fs),t(fs,Tl),t(Se,Nl),t(Se,ds),t(ds,ms),t(ms,Ml),d(e,la,c),d(e,kt,c),t(kt,jl),t(kt,hs),t(hs,Vl),d(e,ia,c),d(e,D,c),_(It,D,null),t(D,zl),t(D,us),t(us,Cl),t(D,Ol),t(D,Ae),_(St,Ae,null),t(Ae,Ql),t(Ae,_s),t(_s,Ul),t(D,Fl),t(D,C),_(At,C,null),t(C,Hl),t(C,Lt),t(Lt,Gl),t(Lt,gs),t(gs,Rl),t(Lt,Wl),t(C,Xl),t(C,vs),t(vs,Jl),t(C,Bl),_(Dt,C,null),t(D,Kl),t(D,Le),_(Tt,Le,null),t(Le,Yl),t(Le,$s),t($s,Zl),d(e,pa,c),d(e,De,c),t(De,ei),t(De,bs),t(bs,ti),t(De,ri),d(e,ca,c),d(e,j,c),_(Nt,j,null),t(j,si),t(j,xs),t(xs,ai),t(j,oi),t(j,Es),t(Es,ni),t(j,li),_(Mt,j,null),d(e,fa,c),d(e,Te,c),t(Te,ii),t(Te,ws),t(ws,pi),t(Te,ci),d(e,da,c),d(e,le,c),t(le,Ne),t(Ne,ys),_(jt,ys,null),t(le,fi),t(le,Ps),t(Ps,di),d(e,ma,c),d(e,pr,c),t(pr,mi),d(e,ha,c),_(Vt,e,c),d(e,ua,c),d(e,Me,c),t(Me,hi),t(Me,qs),t(qs,ui),t(Me,_i),d(e,_a,c),_(zt,e,c),d(e,ga,c),d(e,je,c),t(je,gi),t(je,Ct),t(Ct,vi),t(je,$i),va=!0},p(e,[c]){const Ot={};c&2&&(Ot.$$scope={dirty:c,ctx:e}),ce.$set(Ot);const ks={};c&2&&(ks.$$scope={dirty:c,ctx:e}),fe.$set(ks);const Is={};c&2&&(Is.$$scope={dirty:c,ctx:e}),de.$set(Is)},i(e){va||(g(q.$$.fragment,e),g(Qe.$$.fragment,e),g(Ue.$$.fragment,e),g(Fe.$$.fragment,e),g(ce.$$.fragment,e),g(He.$$.fragment,e),g(Re.$$.fragment,e),g(We.$$.fragment,e),g(fe.$$.fragment,e),g(Je.$$.fragment,e),g(de.$$.fragment,e),g(Ke.$$.fragment,e),g(Ye.$$.fragment,e),g(Ze.$$.fragment,e),g(tt.$$.fragment,e),g(rt.$$.fragment,e),g(st.$$.fragment,e),g(ot.$$.fragment,e),g(lt.$$.fragment,e),g(it.$$.fragment,e),g(pt.$$.fragment,e),g(ct.$$.fragment,e),g(ft.$$.fragment,e),g(dt.$$.fragment,e),g(ut.$$.fragment,e),g(_t.$$.fragment,e),g(Et.$$.fragment,e),g(qt.$$.fragment,e),g(It.$$.fragment,e),g(St.$$.fragment,e),g(At.$$.fragment,e),g(Dt.$$.fragment,e),g(Tt.$$.fragment,e),g(Nt.$$.fragment,e),g(Mt.$$.fragment,e),g(jt.$$.fragment,e),g(Vt.$$.fragment,e),g(zt.$$.fragment,e),va=!0)},o(e){v(q.$$.fragment,e),v(Qe.$$.fragment,e),v(Ue.$$.fragment,e),v(Fe.$$.fragment,e),v(ce.$$.fragment,e),v(He.$$.fragment,e),v(Re.$$.fragment,e),v(We.$$.fragment,e),v(fe.$$.fragment,e),v(Je.$$.fragment,e),v(de.$$.fragment,e),v(Ke.$$.fragment,e),v(Ye.$$.fragment,e),v(Ze.$$.fragment,e),v(tt.$$.fragment,e),v(rt.$$.fragment,e),v(st.$$.fragment,e),v(ot.$$.fragment,e),v(lt.$$.fragment,e),v(it.$$.fragment,e),v(pt.$$.fragment,e),v(ct.$$.fragment,e),v(ft.$$.fragment,e),v(dt.$$.fragment,e),v(ut.$$.fragment,e),v(_t.$$.fragment,e),v(Et.$$.fragment,e),v(qt.$$.fragment,e),v(It.$$.fragment,e),v(St.$$.fragment,e),v(At.$$.fragment,e),v(Dt.$$.fragment,e),v(Tt.$$.fragment,e),v(Nt.$$.fragment,e),v(Mt.$$.fragment,e),v(jt.$$.fragment,e),v(Vt.$$.fragment,e),v(zt.$$.fragment,e),va=!1},d(e){r(m),e&&r(k),e&&r(b),$(q),e&&r(E),e&&r(V),e&&r(Ls),e&&r(ie),e&&r(Ds),e&&r(Z),$(Qe),e&&r(Ts),e&&r(Ht),e&&r(Ns),e&&r(Gt),e&&r(Ms),e&&r(S),$(Ue),$(Fe),$(ce),$(He),$(Re),$(We),$(fe),$(Je),$(de),e&&r(js),e&&r(ee),$(Ke),e&&r(Vs),e&&r(T),e&&r(zs),e&&r(P),$(Ye),$(Ze),$(tt),$(rt),$(st),$(ot),$(lt),e&&r(Cs),e&&r(U),$(it),$(pt),e&&r(Os),e&&r(F),$(ct),$(ft),e&&r(Qs),e&&r(te),$(dt),e&&r(Us),e&&r(we),e&&r(Fs),e&&r(er),e&&r(Hs),e&&r(tr),e&&r(Gs),e&&r(w),e&&r(Rs),e&&r(ye),e&&r(Ws),e&&r(re),$(ut),e&&r(Xs),e&&r(se),$(_t),e&&r(Js),e&&r(ae),e&&r(Bs),e&&r($t),e&&r(Ks),e&&r(ar),e&&r(Ys),e&&r(or),e&&r(Zs),e&&r(nr),e&&r(ea),e&&r(qe),e&&r(ta),e&&r(oe),$(Et),e&&r(ra),e&&r(H),e&&r(sa),e&&r(lr),e&&r(aa),e&&r(ne),$(qt),e&&r(oa),e&&r(ir),e&&r(na),e&&r(Se),e&&r(la),e&&r(kt),e&&r(ia),e&&r(D),$(It),$(St),$(At),$(Dt),$(Tt),e&&r(pa),e&&r(De),e&&r(ca),e&&r(j),$(Nt),$(Mt),e&&r(fa),e&&r(Te),e&&r(da),e&&r(le),$(jt),e&&r(ma),e&&r(pr),e&&r(ha),$(Vt,e),e&&r(ua),e&&r(Me),e&&r(_a),$(zt,e),e&&r(ga),e&&r(je)}}}const Sc={local:"processors",sections:[{local:"transformers.ProcessorMixin",title:"Multi-modal processors"},{local:"transformers.DataProcessor",title:"Deprecated processors"},{local:"transformers.glue_convert_examples_to_features",title:"GLUE"},{local:"xnli",title:"XNLI"},{local:"squad",sections:[{local:"transformers.data.processors.squad.SquadProcessor",title:"Processors"},{local:"example-usage",title:"Example usage"}],title:"SQuAD"}],title:"Processors"};function Ac(B,m,k){let{fw:b}=m;return B.$$set=x=>{"fw"in x&&k(0,b=x.fw)},[b]}class Vc extends xc{constructor(m){super();Ec(this,m,Ac,Ic,wc,{fw:0})}}export{Vc as default,Sc as metadata};
405
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/main_classes/keras_callbacks.mdx-7ee92fa9.js
import{S as pt,i as ht,s as ut,e as o,k as c,w as x,t as n,M as mt,c as s,d as a,m as d,a as r,x as P,h as l,b as p,F as t,g as b,y as E,L as ft,q as z,o as H,B as q}from"../../chunks/vendor-4833417e.js";import{D as dt}from"../../chunks/Docstring-4f315ed9.js";import{C as We}from"../../chunks/CodeBlock-6a3d1b46.js";import{I as Fe}from"../../chunks/IconCopyLink-4b81c553.js";import"../../chunks/CopyButton-dacfbfaf.js";function bt(ue){let k,I,m,_,W,K,me,F,fe,se,A,be,re,y,C,G,O,_e,R,ge,ne,i,j,ke,u,ve,V,ye,we,J,$e,Ce,Q,Te,xe,X,Pe,Ee,ze,Y,He,qe,D,Ke,Z,Oe,je,N,le,w,T,ee,M,De,te,Ne,ie,g,L,Me,$,Le,ae,Se,Ie,oe,Ae,Ue,Be,S,ce;return K=new Fe({}),O=new Fe({}),j=new dt({props:{name:"class transformers.KerasMetricCallback",anchor:"transformers.KerasMetricCallback",parameters:[{name:"metric_fn",val:": typing.Callable"},{name:"eval_dataset",val:": typing.Union[tensorflow.python.data.ops.dataset_ops.DatasetV2, numpy.ndarray, tensorflow.python.framework.ops.Tensor, tuple, dict]"},{name:"output_cols",val:": typing.Optional[typing.List[str]] = None"},{name:"label_cols",val:": typing.Optional[typing.List[str]] = None"},{name:"batch_size",val:": typing.Optional[int] = None"},{name:"predict_with_generate",val:": typing.Optional[bool] = False"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/keras_callbacks.py#L22",parametersDescription:[{anchor:"transformers.KerasMetricCallback.metric_fn",description:`<strong>metric_fn</strong> (<code>Callable</code>) &#x2014; Metric function provided by the user. It will be called with two arguments - <code>predictions</code> and <code>labels</code>. These contain the model&#x2019;s outputs and matching labels from the dataset. It should return a dict mapping metric names to numerical values.`,name:"metric_fn"},{anchor:"transformers.KerasMetricCallback.eval_dataset",description:`<strong>eval_dataset</strong> (<code>tf.data.Dataset</code> or <code>dict</code> or <code>tuple</code> or <code>np.ndarray</code> or <code>tf.Tensor</code>) &#x2014; Validation data to be used to generate predictions for the <code>metric_fn</code>.`,name:"eval_dataset"},{anchor:"transformers.KerasMetricCallback.output_cols",description:"<strong>output_cols</strong> (`List[str], <em>optional</em>) &#x2014;\nA list of columns to be retained from the model output as the predictions. Defaults to all.",name:"output_cols"},{anchor:"transformers.KerasMetricCallback.label_cols",description:`<strong>label_cols</strong> (&#x2019;<code>List[str]</code>, <em>optional</em>&#x2019;) &#x2014; A list of columns to be retained from the input dataset as the labels. Will be autodetected if this is not supplied.`,name:"label_cols"},{anchor:"transformers.KerasMetricCallback.batch_size",description:`<strong>batch_size</strong> (<code>int</code>, <em>optional</em>) &#x2014; Batch size. Only used when the data is not a pre-batched <code>tf.data.Dataset</code>.`,name:"batch_size"},{anchor:"transformers.KerasMetricCallback.predict_with_generate",description:`<strong>predict_with_generate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether we should use <code>model.generate()</code> to get outputs for the model.`,name:"predict_with_generate"}]}}),D=new We({props:{code:`from datasets import load_metric rouge_metric = load_metric("rouge") def rouge_fn(predictions, labels): decoded_predictions = tokenizer.batch_decode(predictions, skip_special_tokens=True) decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) result = rouge_metric.compute(predictions=decoded_predictions, references=decoded_labels) return {key: value.mid.fmeasure * 100 for key, value in result.items()}`,highlighted:`<span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_metric rouge_metric = load_metric(<span class="hljs-string">&quot;rouge&quot;</span>) <span class="hljs-keyword">def</span> <span class="hljs-title function_">rouge_fn</span>(<span class="hljs-params">predictions, labels</span>): decoded_predictions = tokenizer.batch_decode(predictions, skip_special_tokens=<span class="hljs-literal">True</span>) decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=<span class="hljs-literal">True</span>) result = rouge_metric.compute(predictions=decoded_predictions, references=decoded_labels) <span class="hljs-keyword">return</span> {key: value.mid.fmeasure * <span class="hljs-number">100</span> <span class="hljs-keyword">for</span> key, value <span class="hljs-keyword">in</span> result.items()}`}}),N=new We({props:{code:"{'rouge1': 37.4199, 'rouge2': 13.9768, 'rougeL': 34.361, 'rougeLsum': 35.0781",highlighted:'{&#x27;rouge1&#x27;: <span class="hljs-number">37.4199</span>, &#x27;rouge2&#x27;: <span class="hljs-number">13.9768</span>, &#x27;rougeL&#x27;: <span class="hljs-number">34.361</span>, &#x27;rougeLsum&#x27;: <span class="hljs-number">35.0781</span>'}}),M=new Fe({}),L=new dt({props:{name:"class transformers.PushToHubCallback",anchor:"transformers.PushToHubCallback",parameters:[{name:"output_dir",val:": typing.Union[str, pathlib.Path]"},{name:"save_strategy",val:": typing.Union[str, transformers.trainer_utils.IntervalStrategy] = 'epoch'"},{name:"save_steps",val:": typing.Optional[int] = None"},{name:"tokenizer",val:": typing.Optional[transformers.tokenization_utils_base.PreTrainedTokenizerBase] = None"},{name:"hub_model_id",val:": typing.Optional[str] = None"},{name:"hub_token",val:": typing.Optional[str] = None"},{name:"checkpoint",val:": bool = False"},{name:"**model_card_args",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/keras_callbacks.py#L242",parametersDescription:[{anchor:"transformers.PushToHubCallback.output_dir",description:`<strong>output_dir</strong> (<code>str</code>) &#x2014; The output directory where the model predictions and checkpoints will be written and synced with the repository on the Hub.`,name:"output_dir"},{anchor:"transformers.PushToHubCallback.save_strategy",description:`<strong>save_strategy</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/trainer_utils#transformers.IntervalStrategy">IntervalStrategy</a>, <em>optional</em>, defaults to <code>&quot;epoch&quot;</code>) &#x2014; The checkpoint save strategy to adopt during training. Possible values are:</p> <ul> <li><code>&quot;no&quot;</code>: Save is done at the end of training.</li> <li><code>&quot;epoch&quot;</code>: Save is done at the end of each epoch.</li> <li><code>&quot;steps&quot;</code>: Save is done every <code>save_steps</code></li> </ul>`,name:"save_strategy"},{anchor:"transformers.PushToHubCallback.save_steps",description:`<strong>save_steps</strong> (<code>int</code>, <em>optional</em>) &#x2014; The number of steps between saves when using the &#x201C;steps&#x201D; <code>save_strategy</code>.`,name:"save_steps"},{anchor:"transformers.PushToHubCallback.tokenizer",description:`<strong>tokenizer</strong> (<code>PreTrainedTokenizerBase</code>, <em>optional</em>) &#x2014; The tokenizer used by the model. If supplied, will be uploaded to the repo alongside the weights.`,name:"tokenizer"},{anchor:"transformers.PushToHubCallback.hub_model_id",description:`<strong>hub_model_id</strong> (<code>str</code>, <em>optional</em>) &#x2014; The name of the repository to keep in sync with the local <code>output_dir</code>. It can be a simple model ID in which case the model will be pushed in your namespace. Otherwise it should be the whole repository name, for instance <code>&quot;user_name/model&quot;</code>, which allows you to push to an organization you are a member of with <code>&quot;organization_name/model&quot;</code>.</p> <p>Will default to to the name of <code>output_dir</code>.`,name:"hub_model_id"},{anchor:"transformers.PushToHubCallback.hub_token",description:`<strong>hub_token</strong> (<code>str</code>, <em>optional</em>) &#x2014; The token to use to push the model to the Hub. Will default to the token in the cache folder obtained with <code>huggingface-cli login</code>.`,name:"hub_token"},{anchor:"transformers.PushToHubCallback.checkpoint",description:`<strong>checkpoint</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to save full training checkpoints (including epoch and optimizer state) to allow training to be resumed. Only usable when <code>save_strategy</code> is <code>&quot;epoch&quot;</code>.`,name:"checkpoint"}]}}),S=new We({props:{code:`from transformers.keras_callbacks import PushToHubCallback push_to_hub_callback = PushToHubCallback( output_dir="./model_save", tokenizer=tokenizer, hub_model_id="gpt5-7xlarge", ) model.fit(train_dataset, callbacks=[push_to_hub_callback])`,highlighted:`<span class="hljs-keyword">from</span> transformers.keras_callbacks <span class="hljs-keyword">import</span> PushToHubCallback push_to_hub_callback = PushToHubCallback( output_dir=<span class="hljs-string">&quot;./model_save&quot;</span>, tokenizer=tokenizer, hub_model_id=<span class="hljs-string">&quot;gpt5-7xlarge&quot;</span>, ) model.fit(train_dataset, callbacks=[push_to_hub_callback])`}}),{c(){k=o("meta"),I=c(),m=o("h1"),_=o("a"),W=o("span"),x(K.$$.fragment),me=c(),F=o("span"),fe=n("Keras callbacks"),se=c(),A=o("p"),be=n(`When training a Transformers model with Keras, there are some library-specific callbacks available to automate common tasks:`),re=c(),y=o("h2"),C=o("a"),G=o("span"),x(O.$$.fragment),_e=c(),R=o("span"),ge=n("KerasMetricCallback"),ne=c(),i=o("div"),x(j.$$.fragment),ke=c(),u=o("p"),ve=n(`Callback to compute metrics at the end of every epoch. Unlike normal Keras metrics, these do not need to be compilable by TF. It is particularly useful for common NLP metrics like BLEU and ROUGE that require string operations or generation loops that cannot be compiled. Predictions (or generations) will be computed on the `),V=o("code"),ye=n("eval_dataset"),we=n(" before being passed to the "),J=o("code"),$e=n("metric_fn"),Ce=n(" in "),Q=o("code"),Te=n("np.ndarray"),xe=n(" format. The "),X=o("code"),Pe=n("metric_fn"),Ee=n(` should compute metrics and return a dict mapping metric names to metric values.`),ze=c(),Y=o("p"),He=n(`We provide an example of a suitable metric_fn that computes ROUGE scores for a summarization model below. Note that this example skips some post-processing for readability and simplicity, and should probably not be used as-is!`),qe=c(),x(D.$$.fragment),Ke=c(),Z=o("p"),Oe=n("The above function will return a dict containing values which will be logged like any other Keras metric:"),je=c(),x(N.$$.fragment),le=c(),w=o("h2"),T=o("a"),ee=o("span"),x(M.$$.fragment),De=c(),te=o("span"),Ne=n("PushToHubCallback"),ie=c(),g=o("div"),x(L.$$.fragment),Me=c(),$=o("p"),Le=n(`Callback that will save and push the model to the Hub regularly. By default, it pushes once per epoch, but this can be changed with the `),ae=o("code"),Se=n("save_strategy"),Ie=n(` argument. Pushed models can be accessed like any other model on the hub, such as with the `),oe=o("code"),Ae=n("from_pretrained"),Ue=n(" method."),Be=c(),x(S.$$.fragment),this.h()},l(e){const h=mt('[data-svelte="svelte-1phssyn"]',document.head);k=s(h,"META",{name:!0,content:!0}),h.forEach(a),I=d(e),m=s(e,"H1",{class:!0});var de=r(m);_=s(de,"A",{id:!0,class:!0,href:!0});var Ge=r(_);W=s(Ge,"SPAN",{});var Re=r(W);P(K.$$.fragment,Re),Re.forEach(a),Ge.forEach(a),me=d(de),F=s(de,"SPAN",{});var Ve=r(F);fe=l(Ve,"Keras callbacks"),Ve.forEach(a),de.forEach(a),se=d(e),A=s(e,"P",{});var Je=r(A);be=l(Je,`When training a Transformers model with Keras, there are some library-specific callbacks available to automate common tasks:`),Je.forEach(a),re=d(e),y=s(e,"H2",{class:!0});var pe=r(y);C=s(pe,"A",{id:!0,class:!0,href:!0});var Qe=r(C);G=s(Qe,"SPAN",{});var Xe=r(G);P(O.$$.fragment,Xe),Xe.forEach(a),Qe.forEach(a),_e=d(pe),R=s(pe,"SPAN",{});var Ye=r(R);ge=l(Ye,"KerasMetricCallback"),Ye.forEach(a),pe.forEach(a),ne=d(e),i=s(e,"DIV",{class:!0});var f=r(i);P(j.$$.fragment,f),ke=d(f),u=s(f,"P",{});var v=r(u);ve=l(v,`Callback to compute metrics at the end of every epoch. Unlike normal Keras metrics, these do not need to be compilable by TF. It is particularly useful for common NLP metrics like BLEU and ROUGE that require string operations or generation loops that cannot be compiled. Predictions (or generations) will be computed on the `),V=s(v,"CODE",{});var Ze=r(V);ye=l(Ze,"eval_dataset"),Ze.forEach(a),we=l(v," before being passed to the "),J=s(v,"CODE",{});var et=r(J);$e=l(et,"metric_fn"),et.forEach(a),Ce=l(v," in "),Q=s(v,"CODE",{});var tt=r(Q);Te=l(tt,"np.ndarray"),tt.forEach(a),xe=l(v," format. The "),X=s(v,"CODE",{});var at=r(X);Pe=l(at,"metric_fn"),at.forEach(a),Ee=l(v,` should compute metrics and return a dict mapping metric names to metric values.`),v.forEach(a),ze=d(f),Y=s(f,"P",{});var ot=r(Y);He=l(ot,`We provide an example of a suitable metric_fn that computes ROUGE scores for a summarization model below. Note that this example skips some post-processing for readability and simplicity, and should probably not be used as-is!`),ot.forEach(a),qe=d(f),P(D.$$.fragment,f),Ke=d(f),Z=s(f,"P",{});var st=r(Z);Oe=l(st,"The above function will return a dict containing values which will be logged like any other Keras metric:"),st.forEach(a),je=d(f),P(N.$$.fragment,f),f.forEach(a),le=d(e),w=s(e,"H2",{class:!0});var he=r(w);T=s(he,"A",{id:!0,class:!0,href:!0});var rt=r(T);ee=s(rt,"SPAN",{});var nt=r(ee);P(M.$$.fragment,nt),nt.forEach(a),rt.forEach(a),De=d(he),te=s(he,"SPAN",{});var lt=r(te);Ne=l(lt,"PushToHubCallback"),lt.forEach(a),he.forEach(a),ie=d(e),g=s(e,"DIV",{class:!0});var U=r(g);P(L.$$.fragment,U),Me=d(U),$=s(U,"P",{});var B=r($);Le=l(B,`Callback that will save and push the model to the Hub regularly. By default, it pushes once per epoch, but this can be changed with the `),ae=s(B,"CODE",{});var it=r(ae);Se=l(it,"save_strategy"),it.forEach(a),Ie=l(B,` argument. Pushed models can be accessed like any other model on the hub, such as with the `),oe=s(B,"CODE",{});var ct=r(oe);Ae=l(ct,"from_pretrained"),ct.forEach(a),Ue=l(B," method."),B.forEach(a),Be=d(U),P(S.$$.fragment,U),U.forEach(a),this.h()},h(){p(k,"name","hf:doc:metadata"),p(k,"content",JSON.stringify(_t)),p(_,"id","keras-callbacks"),p(_,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(_,"href","#keras-callbacks"),p(m,"class","relative group"),p(C,"id","transformers.KerasMetricCallback"),p(C,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(C,"href","#transformers.KerasMetricCallback"),p(y,"class","relative group"),p(i,"class","docstring"),p(T,"id","transformers.PushToHubCallback"),p(T,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),p(T,"href","#transformers.PushToHubCallback"),p(w,"class","relative group"),p(g,"class","docstring")},m(e,h){t(document.head,k),b(e,I,h),b(e,m,h),t(m,_),t(_,W),E(K,W,null),t(m,me),t(m,F),t(F,fe),b(e,se,h),b(e,A,h),t(A,be),b(e,re,h),b(e,y,h),t(y,C),t(C,G),E(O,G,null),t(y,_e),t(y,R),t(R,ge),b(e,ne,h),b(e,i,h),E(j,i,null),t(i,ke),t(i,u),t(u,ve),t(u,V),t(V,ye),t(u,we),t(u,J),t(J,$e),t(u,Ce),t(u,Q),t(Q,Te),t(u,xe),t(u,X),t(X,Pe),t(u,Ee),t(i,ze),t(i,Y),t(Y,He),t(i,qe),E(D,i,null),t(i,Ke),t(i,Z),t(Z,Oe),t(i,je),E(N,i,null),b(e,le,h),b(e,w,h),t(w,T),t(T,ee),E(M,ee,null),t(w,De),t(w,te),t(te,Ne),b(e,ie,h),b(e,g,h),E(L,g,null),t(g,Me),t(g,$),t($,Le),t($,ae),t(ae,Se),t($,Ie),t($,oe),t(oe,Ae),t($,Ue),t(g,Be),E(S,g,null),ce=!0},p:ft,i(e){ce||(z(K.$$.fragment,e),z(O.$$.fragment,e),z(j.$$.fragment,e),z(D.$$.fragment,e),z(N.$$.fragment,e),z(M.$$.fragment,e),z(L.$$.fragment,e),z(S.$$.fragment,e),ce=!0)},o(e){H(K.$$.fragment,e),H(O.$$.fragment,e),H(j.$$.fragment,e),H(D.$$.fragment,e),H(N.$$.fragment,e),H(M.$$.fragment,e),H(L.$$.fragment,e),H(S.$$.fragment,e),ce=!1},d(e){a(k),e&&a(I),e&&a(m),q(K),e&&a(se),e&&a(A),e&&a(re),e&&a(y),q(O),e&&a(ne),e&&a(i),q(j),q(D),q(N),e&&a(le),e&&a(w),q(M),e&&a(ie),e&&a(g),q(L),q(S)}}}const _t={local:"keras-callbacks",sections:[{local:"transformers.KerasMetricCallback",title:"KerasMetricCallback"},{local:"transformers.PushToHubCallback",title:"PushToHubCallback"}],title:"Keras callbacks"};function gt(ue,k,I){let{fw:m}=k;return ue.$$set=_=>{"fw"in _&&I(0,m=_.fw)},[m]}class Ct extends pt{constructor(k){super();ht(this,k,gt,bt,ut,{fw:0})}}export{Ct as default,_t as metadata};
406
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/main_classes/pipelines.mdx-dd4bd714.js
import{S as PA,i as TA,s as yA,e as r,k as l,w as f,t as a,M as xA,c as o,d as n,m as p,a as s,x as h,h as i,b as c,F as e,g as m,y as u,q as g,o as _,B as v}from"../../chunks/vendor-4833417e.js";import{T as kA}from"../../chunks/Tip-fffd6df1.js";import{D as w}from"../../chunks/Docstring-4f315ed9.js";import{C as z}from"../../chunks/CodeBlock-6a3d1b46.js";import{I as y}from"../../chunks/IconCopyLink-4b81c553.js";import"../../chunks/CopyButton-dacfbfaf.js";function $A(vr){let x,ie,$,A,fe;return{c(){x=r("p"),ie=a(`However, this is not automatically a win for performance. It can be either a 10x speedup or 5x slowdown depending on hardware, data and the actual model being used.`),$=l(),A=r("p"),fe=a("Example where it\u2019s mostly a speedup:")},l(E){x=o(E,"P",{});var O=s(x);ie=i(O,`However, this is not automatically a win for performance. It can be either a 10x speedup or 5x slowdown depending on hardware, data and the actual model being used.`),O.forEach(n),$=p(E),A=o(E,"P",{});var Te=s(A);fe=i(Te,"Example where it\u2019s mostly a speedup:"),Te.forEach(n)},m(E,O){m(E,x,O),e(x,ie),m(E,$,O),m(E,A,O),e(A,fe)},d(E){E&&n(x),E&&n($),E&&n(A)}}}function EA(vr){let x,ie,$,A,fe;return{c(){x=r("p"),ie=a(`This pipeline only works for inputs with exactly one token masked. Experimental: We added support for multiple masks. The returned values are raw model output, and correspond to disjoint probabilities where one might expect joint probabilities (See `),$=r("a"),A=a("discussion"),fe=a(")."),this.h()},l(E){x=o(E,"P",{});var O=s(x);ie=i(O,`This pipeline only works for inputs with exactly one token masked. Experimental: We added support for multiple masks. The returned values are raw model output, and correspond to disjoint probabilities where one might expect joint probabilities (See `),$=o(O,"A",{href:!0,rel:!0});var Te=s($);A=i(Te,"discussion"),Te.forEach(n),fe=i(O,")."),O.forEach(n),this.h()},h(){c($,"href","https://github.com/huggingface/transformers/pull/10222"),c($,"rel","nofollow")},m(E,O){m(E,x,O),e(x,ie),e(x,$),e($,A),e(x,fe)},d(E){E&&n(x)}}}function qA(vr){let x,ie,$,A,fe,E,O,Te,Dg,Vm,Qt,Ig,Ca,jg,Sg,Bm,za,Mg,Zm,Vt,yl,wr,Fg,Da,Lg,Ug,Gg,br,xl,Ng,Og,b,$l,Ia,Rg,Hg,El,ja,Wg,Qg,ql,Sa,Vg,Bg,Al,Ma,Zg,Yg,Cl,Fa,Xg,Kg,zl,La,Jg,e_,Dl,Ua,t_,n_,Il,Ga,r_,o_,jl,Na,s_,a_,Sl,Oa,i_,l_,Ml,Ra,p_,c_,Fl,Ha,d_,m_,Ll,Wa,f_,h_,Ul,Qa,u_,g_,Gl,Va,__,v_,Nl,Ba,w_,b_,Ol,Za,k_,P_,Rl,Ya,T_,Ym,Ve,Bt,Hl,kr,y_,Wl,x_,Xm,Zt,$_,Ql,E_,q_,Km,Xa,A_,Jm,Pr,ef,Yt,C_,Tr,z_,D_,tf,yr,nf,Xt,I_,Vl,j_,S_,rf,xr,of,Kt,M_,Bl,F_,L_,sf,$r,af,Ka,U_,lf,Er,pf,R,qr,G_,Ar,N_,Ja,O_,R_,H_,Zl,W_,Q_,Be,Cr,V_,ei,B_,Z_,Y_,zr,X_,ti,K_,J_,e1,Yl,t1,n1,Xl,r1,o1,Dr,cf,Ze,Jt,Kl,Ir,s1,Jl,a1,df,qe,i1,ep,l1,p1,tp,c1,d1,mf,jr,ff,en,hf,Sr,uf,Mr,gf,ni,m1,_f,Fr,vf,tn,f1,np,h1,u1,wf,Lr,bf,ri,g1,kf,oi,_1,Pf,le,rp,op,sp,v1,w1,ap,ip,b1,k1,lp,pp,P1,T1,Ur,cp,y1,x1,Ye,dp,$1,E1,mp,q1,A1,fp,C1,z1,hp,up,D1,Tf,Xe,nn,gp,Gr,I1,_p,j1,yf,ye,vp,S1,M1,wp,F1,L1,bp,U1,G1,xf,Ae,N1,kp,O1,R1,Pp,H1,W1,$f,Nr,Ef,si,Q1,qf,Or,Af,ai,V1,Cf,rn,B1,Tp,Z1,Y1,zf,Ke,on,yp,Rr,X1,xp,K1,Df,ii,J1,If,sn,ev,$p,tv,nv,jf,li,rv,Sf,pi,Ep,ov,Mf,Hr,Ff,ci,sv,Lf,Je,an,qp,Wr,av,Ap,iv,Uf,di,mi,lv,Gf,et,ln,Cp,Qr,pv,zp,cv,Nf,tt,pn,Dp,Vr,dv,Ip,mv,Of,J,Br,fv,Zr,hv,jp,uv,gv,_v,nt,vv,fi,wv,bv,Sp,kv,Pv,Tv,Yr,yv,Xr,xv,$v,Ev,cn,Kr,qv,Jr,Av,hi,Cv,zv,Rf,rt,dn,Mp,eo,Dv,Fp,Iv,Hf,he,to,jv,Lp,Sv,Mv,Up,Fv,Lv,mn,no,Uv,ro,Gv,ui,Nv,Ov,Wf,ot,fn,Gp,oo,Rv,Np,Hv,Qf,D,so,Wv,xe,Qv,gi,Vv,Bv,_i,Zv,Yv,Op,Xv,Kv,Jv,Rp,ew,tw,ao,nw,hn,io,rw,lo,ow,Hp,sw,aw,iw,un,po,lw,Wp,pw,cw,Ce,co,dw,Qp,mw,fw,ue,hw,Vp,uw,gw,Bp,_w,vw,Zp,ww,bw,Yp,kw,Pw,Tw,gn,mo,yw,$e,xw,Xp,$w,Ew,Kp,qw,Aw,Jp,Cw,zw,Vf,M,fo,Dw,ec,Iw,jw,st,Sw,vi,Mw,Fw,tc,Lw,Uw,Gw,ge,Nw,nc,Ow,Rw,rc,Hw,Ww,oc,Qw,Vw,ho,Bw,Zw,Yw,sc,Xw,Kw,uo,Jw,_n,go,eb,ac,tb,Bf,at,vn,ic,_o,nb,lc,rb,Zf,ee,vo,ob,pc,sb,ab,it,ib,wi,lb,pb,cc,cb,db,mb,wo,fb,bo,hb,ub,gb,wn,ko,_b,dc,vb,Yf,lt,bn,mc,Po,wb,fc,bb,Xf,H,To,kb,pt,Pb,hc,Tb,yb,bi,xb,$b,Eb,ct,qb,ki,Ab,Cb,uc,zb,Db,Ib,yo,jb,xo,Sb,Mb,Fb,kn,Lb,Pn,$o,Ub,gc,Gb,Kf,dt,Tn,_c,Eo,Nb,vc,Ob,Jf,te,qo,Rb,Ao,Hb,wc,Wb,Qb,Vb,mt,Bb,Pi,Zb,Yb,bc,Xb,Kb,Jb,Co,ek,zo,tk,nk,rk,yn,Do,ok,kc,sk,eh,ft,xn,Pc,Io,ak,Tc,ik,th,ne,jo,lk,So,pk,yc,ck,dk,mk,ht,fk,Ti,hk,uk,xc,gk,_k,vk,Mo,wk,Fo,bk,kk,Pk,$n,Lo,Tk,$c,yk,nh,ut,En,Ec,Uo,xk,qc,$k,rh,I,Go,Ek,gt,qk,Ac,Ak,Ck,yi,zk,Dk,Ik,_t,jk,xi,Sk,Mk,Cc,Fk,Lk,Uk,No,Gk,Oo,Nk,Ok,Rk,ze,Ro,Hk,zc,Wk,Qk,Dc,Vk,Bk,qn,Ho,Zk,Ic,Yk,Xk,An,Wo,Kk,jc,Jk,eP,Cn,Qo,tP,Sc,nP,oh,zn,rP,$i,oP,sP,sh,vt,Dn,Mc,Vo,aP,Fc,iP,ah,re,Bo,lP,Zo,pP,Lc,cP,dP,mP,wt,fP,Ei,hP,uP,Uc,gP,_P,vP,Yo,wP,Xo,bP,kP,PP,In,Ko,TP,Gc,yP,ih,bt,jn,Nc,Jo,xP,Oc,$P,lh,j,es,EP,kt,qP,Rc,AP,CP,qi,zP,DP,IP,Pt,jP,Ai,SP,MP,Hc,FP,LP,UP,ts,GP,ns,NP,OP,RP,Sn,rs,HP,Wc,WP,QP,De,os,VP,Mn,BP,Qc,ZP,YP,Vc,XP,KP,Bc,JP,eT,Ie,ss,tT,as,nT,Zc,rT,oT,sT,Yc,aT,iT,Fn,is,lT,Xc,pT,ph,Tt,Ln,Kc,ls,cT,Jc,dT,ch,F,ps,mT,ed,fT,hT,yt,uT,Ci,gT,_T,td,vT,wT,bT,L,kT,nd,PT,TT,rd,yT,xT,od,$T,ET,sd,qT,AT,ad,CT,zT,id,DT,IT,cs,jT,ST,MT,ld,FT,LT,ds,UT,Un,ms,GT,pd,NT,dh,xt,Gn,cd,fs,OT,dd,RT,mh,oe,hs,HT,us,WT,md,QT,VT,BT,$t,ZT,zi,YT,XT,fd,KT,JT,ey,gs,ty,_s,ny,ry,oy,C,vs,sy,hd,ay,iy,W,ud,gd,ly,py,_d,vd,cy,dy,wd,bd,my,fy,kd,Pd,hy,uy,Td,yd,gy,_y,xd,$d,vy,wy,Ed,qd,by,ky,ws,Py,Ad,Ty,yy,xy,Cd,$y,Ey,bs,qy,zd,Ay,Cy,Dd,zy,Dy,ks,fh,Et,Nn,Id,Ps,Iy,jd,jy,hh,Q,Ts,Sy,qt,My,Sd,Fy,Ly,Di,Uy,Gy,Ny,At,Oy,Ii,Ry,Hy,Md,Wy,Qy,Vy,ys,By,Fd,Zy,Yy,Xy,xs,Ky,$s,Jy,e2,t2,On,Es,n2,Ld,r2,uh,Ct,Rn,Ud,qs,o2,Gd,s2,gh,se,As,a2,Cs,i2,Nd,l2,p2,c2,zt,d2,ji,m2,f2,Od,h2,u2,g2,zs,_2,Ds,v2,w2,b2,Hn,Is,k2,Rd,P2,_h,Dt,Wn,Hd,js,T2,Wd,y2,vh,S,Ss,x2,Qd,$2,E2,It,q2,Si,A2,C2,Vd,z2,D2,I2,Ms,j2,Fs,S2,M2,F2,Bd,L2,U2,Ls,G2,Qn,Us,N2,Zd,O2,R2,Vn,Gs,H2,Yd,W2,wh,jt,Bn,Xd,Ns,Q2,Kd,V2,bh,q,Os,B2,St,Z2,Jd,Y2,X2,Mi,K2,J2,e4,Mt,t4,Fi,n4,r4,em,o4,s4,a4,Rs,i4,Hs,l4,p4,c4,Zn,Ws,d4,tm,m4,f4,je,Qs,h4,nm,u4,g4,rm,_4,v4,Yn,Vs,w4,om,b4,k4,Xn,Bs,P4,sm,T4,y4,Kn,Zs,x4,am,$4,kh,Ft,Jn,im,Ys,E4,lm,q4,Ph,U,Xs,A4,pm,C4,z4,Lt,D4,Li,I4,j4,cm,S4,M4,F4,Ks,L4,Js,U4,G4,N4,dm,O4,R4,ea,H4,er,ta,W4,mm,Q4,Th,Ut,tr,fm,na,V4,hm,B4,yh,V,ra,Z4,oa,Y4,um,X4,K4,J4,Ee,e0,gm,t0,n0,_m,r0,o0,vm,s0,a0,i0,Gt,l0,Ui,p0,c0,wm,d0,m0,f0,sa,h0,aa,u0,g0,_0,nr,ia,v0,la,w0,Gi,b0,k0,xh,Nt,rr,bm,pa,P0,km,T0,$h,ae,ca,y0,Ot,x0,Pm,$0,E0,Tm,q0,A0,C0,Rt,z0,Ni,D0,I0,ym,j0,S0,M0,da,F0,ma,L0,U0,G0,or,fa,N0,xm,O0,Eh,Ht,sr,$m,ha,R0,Oi,H0,Em,W0,qh,P,ua,Q0,qm,V0,B0,Am,Z0,Y0,Cm,X0,K0,zm,J0,ex,_e,tx,Ri,nx,rx,Dm,ox,sx,Im,ax,ix,jm,lx,px,cx,ar,ga,dx,Sm,mx,fx,ve,_a,hx,Mm,ux,gx,Fm,_x,vx,va,wx,ir,wa,bx,Lm,kx,Px,lr,ba,Tx,ka,yx,Um,xx,$x,Ex,pr,Pa,qx,Ta,Ax,Gm,Cx,zx,Dx,cr,ya,Ix,Wt,jx,Nm,Sx,Mx,Om,Fx,Lx,Ux,dr,xa,Gx,Rm,Nx,Ox,mr,$a,Rx,Ea,Hx,Hm,Wx,Qx,Ah;return E=new y({}),kr=new y({}),Pr=new z({props:{code:`pipe = pipeline("text-classification") pipe("This restaurant is awesome")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>pipe = pipeline(<span class="hljs-string">&quot;text-classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>pipe(<span class="hljs-string">&quot;This restaurant is awesome&quot;</span>) [{<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;POSITIVE&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.9998743534088135</span>}]`}}),yr=new z({props:{code:`pipe = pipeline(model="roberta-large-mnli") pipe("This restaurant is awesome")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>pipe = pipeline(model=<span class="hljs-string">&quot;roberta-large-mnli&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>pipe(<span class="hljs-string">&quot;This restaurant is awesome&quot;</span>) [{<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;POSITIVE&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.9998743534088135</span>}]`}}),xr=new z({props:{code:`pipe = pipeline("text-classification") pipe(["This restaurant is awesome", "This restaurant is aweful"])`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>pipe = pipeline(<span class="hljs-string">&quot;text-classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>pipe([<span class="hljs-string">&quot;This restaurant is awesome&quot;</span>, <span class="hljs-string">&quot;This restaurant is aweful&quot;</span>]) [{<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;POSITIVE&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.9998743534088135</span>}, {<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;NEGATIVE&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.9996669292449951</span>}]`}}),$r=new z({props:{code:`import datasets from transformers import pipeline from transformers.pipelines.pt_utils import KeyDataset from tqdm.auto import tqdm pipe = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h", device=0) dataset = datasets.load_dataset("superb", name="asr", split="test") # KeyDataset (only *pt*) will simply return the item in the dict returned by the dataset item # as we're not interested in the *target* part of the dataset. for out in tqdm(pipe(KeyDataset(dataset, "file"))): print(out) # {"text": "NUMBER TEN FRESH NELLY IS WAITING ON YOU GOOD NIGHT HUSBAND"} # {"text": ....} # ....`,highlighted:`<span class="hljs-keyword">import</span> datasets <span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-keyword">from</span> transformers.pipelines.pt_utils <span class="hljs-keyword">import</span> KeyDataset <span class="hljs-keyword">from</span> tqdm.auto <span class="hljs-keyword">import</span> tqdm pipe = pipeline(<span class="hljs-string">&quot;automatic-speech-recognition&quot;</span>, model=<span class="hljs-string">&quot;facebook/wav2vec2-base-960h&quot;</span>, device=<span class="hljs-number">0</span>) dataset = datasets.load_dataset(<span class="hljs-string">&quot;superb&quot;</span>, name=<span class="hljs-string">&quot;asr&quot;</span>, split=<span class="hljs-string">&quot;test&quot;</span>) <span class="hljs-comment"># KeyDataset (only *pt*) will simply return the item in the dict returned by the dataset item</span> <span class="hljs-comment"># as we&#x27;re not interested in the *target* part of the dataset.</span> <span class="hljs-keyword">for</span> out <span class="hljs-keyword">in</span> tqdm(pipe(KeyDataset(dataset, <span class="hljs-string">&quot;file&quot;</span>))): <span class="hljs-built_in">print</span>(out) <span class="hljs-comment"># {&quot;text&quot;: &quot;NUMBER TEN FRESH NELLY IS WAITING ON YOU GOOD NIGHT HUSBAND&quot;}</span> <span class="hljs-comment"># {&quot;text&quot;: ....}</span> <span class="hljs-comment"># ....</span>`}}),Er=new z({props:{code:`from transformers import pipeline pipe = pipeline("text-classification") def data(): while True: # This could come from a dataset, a database, a queue or HTTP request # in a server # Caveat: because this is iterative, you cannot use \`num_workers > 1\` variable # to use multiple threads to preprocess data. You can still have 1 thread that # does the preprocessing while the main runs the big inference yield "This is a test" for out in pipe(data()): print(out) # {"text": "NUMBER TEN FRESH NELLY IS WAITING ON YOU GOOD NIGHT HUSBAND"} # {"text": ....} # ....`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline pipe = pipeline(<span class="hljs-string">&quot;text-classification&quot;</span>) <span class="hljs-keyword">def</span> <span class="hljs-title function_">data</span>(): <span class="hljs-keyword">while</span> <span class="hljs-literal">True</span>: <span class="hljs-comment"># This could come from a dataset, a database, a queue or HTTP request</span> <span class="hljs-comment"># in a server</span> <span class="hljs-comment"># Caveat: because this is iterative, you cannot use \`num_workers &gt; 1\` variable</span> <span class="hljs-comment"># to use multiple threads to preprocess data. You can still have 1 thread that</span> <span class="hljs-comment"># does the preprocessing while the main runs the big inference</span> <span class="hljs-keyword">yield</span> <span class="hljs-string">&quot;This is a test&quot;</span> <span class="hljs-keyword">for</span> out <span class="hljs-keyword">in</span> pipe(data()): <span class="hljs-built_in">print</span>(out) <span class="hljs-comment"># {&quot;text&quot;: &quot;NUMBER TEN FRESH NELLY IS WAITING ON YOU GOOD NIGHT HUSBAND&quot;}</span> <span class="hljs-comment"># {&quot;text&quot;: ....}</span> <span class="hljs-comment"># ....</span>`}}),qr=new w({props:{name:"transformers.pipeline",anchor:"transformers.pipeline",parameters:[{name:"task",val:": str = None"},{name:"model",val:": typing.Optional = None"},{name:"config",val:": typing.Union[str, transformers.configuration_utils.PretrainedConfig, NoneType] = None"},{name:"tokenizer",val:": typing.Union[str, transformers.tokenization_utils.PreTrainedTokenizer, NoneType] = None"},{name:"feature_extractor",val:": typing.Union[str, ForwardRef('SequenceFeatureExtractor'), NoneType] = None"},{name:"framework",val:": typing.Optional[str] = None"},{name:"revision",val:": typing.Optional[str] = None"},{name:"use_fast",val:": bool = True"},{name:"use_auth_token",val:": typing.Union[str, bool, NoneType] = None"},{name:"model_kwargs",val:": typing.Dict[str, typing.Any] = None"},{name:"pipeline_class",val:": typing.Optional[typing.Any] = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines.py#L373",parametersDescription:[{anchor:"transformers.pipeline.task",description:`<strong>task</strong> (<code>str</code>) &#x2014; The task defining which pipeline will be returned. Currently accepted tasks are:</p> <ul> <li><code>&quot;audio-classification&quot;</code>: will return a <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.AudioClassificationPipeline">AudioClassificationPipeline</a>.</li> <li><code>&quot;automatic-speech-recognition&quot;</code>: will return a <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.AutomaticSpeechRecognitionPipeline">AutomaticSpeechRecognitionPipeline</a>.</li> <li><code>&quot;conversational&quot;</code>: will return a <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.ConversationalPipeline">ConversationalPipeline</a>.</li> <li><code>&quot;feature-extraction&quot;</code>: will return a <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.FeatureExtractionPipeline">FeatureExtractionPipeline</a>.</li> <li><code>&quot;fill-mask&quot;</code>: will return a <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.FillMaskPipeline">FillMaskPipeline</a>:.</li> <li><code>&quot;image-classification&quot;</code>: will return a <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.ImageClassificationPipeline">ImageClassificationPipeline</a>.</li> <li><code>&quot;question-answering&quot;</code>: will return a <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.QuestionAnsweringPipeline">QuestionAnsweringPipeline</a>.</li> <li><code>&quot;table-question-answering&quot;</code>: will return a <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.TableQuestionAnsweringPipeline">TableQuestionAnsweringPipeline</a>.</li> <li><code>&quot;text2text-generation&quot;</code>: will return a <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.Text2TextGenerationPipeline">Text2TextGenerationPipeline</a>.</li> <li><code>&quot;text-classification&quot;</code> (alias <code>&quot;sentiment-analysis&quot;</code> available): will return a <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.TextClassificationPipeline">TextClassificationPipeline</a>.</li> <li><code>&quot;text-generation&quot;</code>: will return a <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.TextGenerationPipeline">TextGenerationPipeline</a>:.</li> <li><code>&quot;token-classification&quot;</code> (alias <code>&quot;ner&quot;</code> available): will return a <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.TokenClassificationPipeline">TokenClassificationPipeline</a>.</li> <li><code>&quot;translation&quot;</code>: will return a <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.TranslationPipeline">TranslationPipeline</a>.</li> <li><code>&quot;translation_xx_to_yy&quot;</code>: will return a <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.TranslationPipeline">TranslationPipeline</a>.</li> <li><code>&quot;summarization&quot;</code>: will return a <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.SummarizationPipeline">SummarizationPipeline</a>.</li> <li><code>&quot;zero-shot-classification&quot;</code>: will return a <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.ZeroShotClassificationPipeline">ZeroShotClassificationPipeline</a>.</li> </ul>`,name:"task"},{anchor:"transformers.pipeline.model",description:`<strong>model</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>, <em>optional</em>) &#x2014; The model that will be used by the pipeline to make predictions. This can be a model identifier or an actual instance of a pretrained model inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> (for PyTorch) or <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> (for TensorFlow).</p> <p>If not provided, the default for the <code>task</code> will be loaded.`,name:"model"},{anchor:"transformers.pipeline.config",description:`<strong>config</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>, <em>optional</em>) &#x2014; The configuration that will be used by the pipeline to instantiate the model. This can be a model identifier or an actual pretrained model configuration inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>.</p> <p>If not provided, the default configuration file for the requested model will be used. That means that if <code>model</code> is given, its default configuration will be used. However, if <code>model</code> is not supplied, this <code>task</code>&#x2019;s default model&#x2019;s config is used instead.`,name:"config"},{anchor:"transformers.pipeline.tokenizer",description:`<strong>tokenizer</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>, <em>optional</em>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This can be a model identifier or an actual pretrained tokenizer inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.</p> <p>If not provided, the default tokenizer for the given <code>model</code> will be loaded (if it is a string). If <code>model</code> is not specified or not a string, then the default tokenizer for <code>config</code> is loaded (if it is a string). However, if <code>config</code> is also not given or not a string, then the default tokenizer for the given <code>task</code> will be loaded.`,name:"tokenizer"},{anchor:"transformers.pipeline.feature_extractor",description:`<strong>feature_extractor</strong> (<code>str</code> or <code>PreTrainedFeatureExtractor</code>, <em>optional</em>) &#x2014; The feature extractor that will be used by the pipeline to encode data for the model. This can be a model identifier or an actual pretrained feature extractor inheriting from <code>PreTrainedFeatureExtractor</code>.</p> <p>Feature extractors are used for non-NLP models, such as Speech or Vision models as well as multi-modal models. Multi-modal models will also require a tokenizer to be passed.</p> <p>If not provided, the default feature extractor for the given <code>model</code> will be loaded (if it is a string). If <code>model</code> is not specified or not a string, then the default feature extractor for <code>config</code> is loaded (if it is a string). However, if <code>config</code> is also not given or not a string, then the default feature extractor for the given <code>task</code> will be loaded.`,name:"feature_extractor"},{anchor:"transformers.pipeline.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.`,name:"framework"},{anchor:"transformers.pipeline.revision",description:`<strong>revision</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;main&quot;</code>) &#x2014; When passing a task name or a string model identifier: The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <code>revision</code> can be any identifier allowed by git.`,name:"revision"},{anchor:"transformers.pipeline.use_fast",description:`<strong>use_fast</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to use a Fast tokenizer if possible (a <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>).`,name:"use_fast"},{anchor:"transformers.pipeline.use_auth_token",description:`<strong>use_auth_token</strong> (<code>str</code> or <em>bool</em>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>transformers-cli login</code> (stored in <code>~/.huggingface</code>). model_kwargs &#x2014; Additional dictionary of keyword arguments passed along to the model&#x2019;s <code>from_pretrained(..., **model_kwargs)</code> function. kwargs &#x2014; Additional keyword arguments passed along to the specific pipeline init (see the documentation for the corresponding pipeline class for possible values).`,name:"use_auth_token"}],returnDescription:` <p>A suitable pipeline for the task.</p> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.Pipeline" >Pipeline</a></p> `}}),Dr=new z({props:{code:`from transformers import pipeline, AutoModelForTokenClassification, AutoTokenizer # Sentiment analysis pipeline pipeline("sentiment-analysis") # Question answering pipeline, specifying the checkpoint identifier pipeline("question-answering", model="distilbert-base-cased-distilled-squad", tokenizer="bert-base-cased") # Named entity recognition pipeline, passing in a specific model and tokenizer model = AutoModelForTokenClassification.from_pretrained("dbmdz/bert-large-cased-finetuned-conll03-english") tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") pipeline("ner", model=model, tokenizer=tokenizer)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline, AutoModelForTokenClassification, AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Sentiment analysis pipeline</span> <span class="hljs-meta">&gt;&gt;&gt; </span>pipeline(<span class="hljs-string">&quot;sentiment-analysis&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Question answering pipeline, specifying the checkpoint identifier</span> <span class="hljs-meta">&gt;&gt;&gt; </span>pipeline(<span class="hljs-string">&quot;question-answering&quot;</span>, model=<span class="hljs-string">&quot;distilbert-base-cased-distilled-squad&quot;</span>, tokenizer=<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Named entity recognition pipeline, passing in a specific model and tokenizer</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForTokenClassification.from_pretrained(<span class="hljs-string">&quot;dbmdz/bert-large-cased-finetuned-conll03-english&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>pipeline(<span class="hljs-string">&quot;ner&quot;</span>, model=model, tokenizer=tokenizer)`}}),Ir=new y({}),jr=new z({props:{code:`from transformers import pipeline from transformers.pipelines.pt_utils import KeyDataset import datasets dataset = datasets.load_dataset("imdb", name="plain_text", split="unsupervised") pipe = pipeline("text-classification", device=0) for out in pipe(KeyDataset(dataset, "text"), batch_size=8, truncation="only_first"): print(out) # [{'label': 'POSITIVE', 'score': 0.9998743534088135}] # Exactly the same output as before, but the content are passed # as batches to the model`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-keyword">from</span> transformers.pipelines.pt_utils <span class="hljs-keyword">import</span> KeyDataset <span class="hljs-keyword">import</span> datasets dataset = datasets.load_dataset(<span class="hljs-string">&quot;imdb&quot;</span>, name=<span class="hljs-string">&quot;plain_text&quot;</span>, split=<span class="hljs-string">&quot;unsupervised&quot;</span>) pipe = pipeline(<span class="hljs-string">&quot;text-classification&quot;</span>, device=<span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> out <span class="hljs-keyword">in</span> pipe(KeyDataset(dataset, <span class="hljs-string">&quot;text&quot;</span>), batch_size=<span class="hljs-number">8</span>, truncation=<span class="hljs-string">&quot;only_first&quot;</span>): <span class="hljs-built_in">print</span>(out) <span class="hljs-comment"># [{&#x27;label&#x27;: &#x27;POSITIVE&#x27;, &#x27;score&#x27;: 0.9998743534088135}]</span> <span class="hljs-comment"># Exactly the same output as before, but the content are passed</span> <span class="hljs-comment"># as batches to the model</span>`}}),en=new kA({props:{warning:"&lcub;true}",$$slots:{default:[$A]},$$scope:{ctx:vr}}}),Sr=new z({props:{code:`from transformers import pipeline from torch.utils.data import Dataset from tqdm.auto import tqdm pipe = pipeline("text-classification", device=0) class MyDataset(Dataset): def __len__(self): return 5000 def __getitem__(self, i): return "This is a test" dataset = MyDataset() for batch_size in [1, 8, 64, 256]: print("-" * 30) print(f"Streaming batch_size={batch_size}") for out in tqdm(pipe(dataset, batch_size=batch_size), total=len(dataset)): pass`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-keyword">from</span> torch.utils.data <span class="hljs-keyword">import</span> Dataset <span class="hljs-keyword">from</span> tqdm.auto <span class="hljs-keyword">import</span> tqdm pipe = pipeline(<span class="hljs-string">&quot;text-classification&quot;</span>, device=<span class="hljs-number">0</span>) <span class="hljs-keyword">class</span> <span class="hljs-title class_">MyDataset</span>(<span class="hljs-title class_ inherited__">Dataset</span>): <span class="hljs-keyword">def</span> <span class="hljs-title function_">__len__</span>(<span class="hljs-params">self</span>): <span class="hljs-keyword">return</span> <span class="hljs-number">5000</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">__getitem__</span>(<span class="hljs-params">self, i</span>): <span class="hljs-keyword">return</span> <span class="hljs-string">&quot;This is a test&quot;</span> dataset = MyDataset() <span class="hljs-keyword">for</span> batch_size <span class="hljs-keyword">in</span> [<span class="hljs-number">1</span>, <span class="hljs-number">8</span>, <span class="hljs-number">64</span>, <span class="hljs-number">256</span>]: <span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;-&quot;</span> * <span class="hljs-number">30</span>) <span class="hljs-built_in">print</span>(<span class="hljs-string">f&quot;Streaming batch_size=<span class="hljs-subst">{batch_size}</span>&quot;</span>) <span class="hljs-keyword">for</span> out <span class="hljs-keyword">in</span> tqdm(pipe(dataset, batch_size=batch_size), total=<span class="hljs-built_in">len</span>(dataset)): <span class="hljs-keyword">pass</span>`}}),Mr=new z({props:{code:`# On GTX 970 ------------------------------ Streaming no batching 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 5000/5000 [00:26<00:00, 187.52it/s] ------------------------------ Streaming batch_size=8 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 5000/5000 [00:04<00:00, 1205.95it/s] ------------------------------ Streaming batch_size=64 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 5000/5000 [00:02<00:00, 2478.24it/s] ------------------------------ Streaming batch_size=256 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 5000/5000 [00:01<00:00, 2554.43it/s] (diminishing returns, saturated the GPU)`,highlighted:`<span class="hljs-section"># On GTX 970 ------------------------------</span> Streaming no batching 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 5000/5000 [00:26&lt;00:00, 187.52it/s] <span class="hljs-code">------------------------------ Streaming batch_size=8 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 5000/5000 [00:04&lt;00:00, 1205.95it/s] ------------------------------</span> Streaming batch<span class="hljs-emphasis">_size=64 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 5000/5000 [00:02&lt;00:00, 2478.24it/s] ------------------------------ Streaming batch_</span>size=256 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 5000/5000 [00:01&lt;00:00, 2554.43it/s] (diminishing returns, saturated the GPU)`}}),Fr=new z({props:{code:`class MyDataset(Dataset): def __len__(self): return 5000 def __getitem__(self, i): if i % 64 == 0: n = 100 else: n = 1 return "This is a test" * n`,highlighted:`<span class="hljs-keyword">class</span> <span class="hljs-title class_">MyDataset</span>(<span class="hljs-title class_ inherited__">Dataset</span>): <span class="hljs-keyword">def</span> <span class="hljs-title function_">__len__</span>(<span class="hljs-params">self</span>): <span class="hljs-keyword">return</span> <span class="hljs-number">5000</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">__getitem__</span>(<span class="hljs-params">self, i</span>): <span class="hljs-keyword">if</span> i % <span class="hljs-number">64</span> == <span class="hljs-number">0</span>: n = <span class="hljs-number">100</span> <span class="hljs-keyword">else</span>: n = <span class="hljs-number">1</span> <span class="hljs-keyword">return</span> <span class="hljs-string">&quot;This is a test&quot;</span> * n`}}),Lr=new z({props:{code:`------------------------------ Streaming no batching 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1000/1000 [00:05<00:00, 183.69it/s] ------------------------------ Streaming batch_size=8 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1000/1000 [00:03<00:00, 265.74it/s] ------------------------------ Streaming batch_size=64 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1000/1000 [00:26<00:00, 37.80it/s] ------------------------------ Streaming batch_size=256 0%| | 0/1000 [00:00<?, ?it/s] Traceback (most recent call last): File "/home/nicolas/src/transformers/test.py", line 42, in <module> for out in tqdm(pipe(dataset, batch_size=256), total=len(dataset)): .... q = q / math.sqrt(dim_per_head) # (bs, n_heads, q_length, dim_per_head) RuntimeError: CUDA out of memory. Tried to allocate 376.00 MiB (GPU 0; 3.95 GiB total capacity; 1.72 GiB already allocated; 354.88 MiB free; 2.46 GiB reserved in total by PyTorch)`,highlighted:`<span class="hljs-comment">------------------------------</span> Streaming no batching <span class="hljs-number">100</span>%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| <span class="hljs-number">1000</span>/<span class="hljs-number">1000</span> [<span class="hljs-number">00</span>:<span class="hljs-number">05</span>&lt;<span class="hljs-number">00</span>:<span class="hljs-number">00</span>, <span class="hljs-number">183.69</span><span class="hljs-keyword">it</span>/s] <span class="hljs-comment">------------------------------</span> Streaming batch_size=<span class="hljs-number">8</span> <span class="hljs-number">100</span>%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| <span class="hljs-number">1000</span>/<span class="hljs-number">1000</span> [<span class="hljs-number">00</span>:<span class="hljs-number">03</span>&lt;<span class="hljs-number">00</span>:<span class="hljs-number">00</span>, <span class="hljs-number">265.74</span><span class="hljs-keyword">it</span>/s] <span class="hljs-comment">------------------------------</span> Streaming batch_size=<span class="hljs-number">64</span> <span class="hljs-number">100</span>%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| <span class="hljs-number">1000</span>/<span class="hljs-number">1000</span> [<span class="hljs-number">00</span>:<span class="hljs-number">26</span>&lt;<span class="hljs-number">00</span>:<span class="hljs-number">00</span>, <span class="hljs-number">37.80</span><span class="hljs-keyword">it</span>/s] <span class="hljs-comment">------------------------------</span> Streaming batch_size=<span class="hljs-number">256</span> <span class="hljs-number">0</span>%| | <span class="hljs-number">0</span>/<span class="hljs-number">1000</span> [<span class="hljs-number">00</span>:<span class="hljs-number">00</span><span class="hljs-meta">&lt;?</span>, ?<span class="hljs-keyword">it</span>/s] Traceback (most recent call <span class="hljs-keyword">last</span>): File <span class="hljs-string">&quot;/home/nicolas/src/transformers/test.py&quot;</span>, <span class="hljs-built_in">line</span> <span class="hljs-number">42</span>, <span class="hljs-keyword">in</span> &lt;module&gt; <span class="hljs-keyword">for</span> out <span class="hljs-keyword">in</span> tqdm(pipe(dataset, batch_size=<span class="hljs-number">256</span>), total=<span class="hljs-built_in">len</span>(dataset)): .... q = q / math.<span class="hljs-built_in">sqrt</span>(dim_per_head) <span class="hljs-comment"># (bs, n_heads, q_length, dim_per_head)</span> RuntimeError: CUDA out <span class="hljs-keyword">of</span> memory. Tried <span class="hljs-built_in">to</span> allocate <span class="hljs-number">376.00</span> MiB (GPU <span class="hljs-number">0</span>; <span class="hljs-number">3.95</span> GiB total capacity; <span class="hljs-number">1.72</span> GiB already allocated; <span class="hljs-number">354.88</span> MiB free; <span class="hljs-number">2.46</span> GiB reserved <span class="hljs-keyword">in</span> total <span class="hljs-keyword">by</span> PyTorch)`}}),Gr=new y({}),Nr=new z({props:{code:`preprocessed = pipe.preprocess(inputs) model_outputs = pipe.forward(preprocessed) outputs = pipe.postprocess(model_outputs)`,highlighted:`preprocessed = pipe.preprocess(inputs) model_outputs = pipe.forward(preprocessed) outputs = pipe.postprocess(model_outputs)`}}),Or=new z({props:{code:`all_model_outputs = [] for preprocessed in pipe.preprocess(inputs): model_outputs = pipe.forward(preprocessed) all_model_outputs.append(model_outputs) outputs = pipe.postprocess(all_model_outputs)`,highlighted:`all_model_outputs = [] <span class="hljs-keyword">for</span> preprocessed <span class="hljs-keyword">in</span> pipe.preprocess(inputs): model_outputs = pipe.forward(preprocessed) all_model_outputs.append(model_outputs) outputs = pipe.postprocess(all_model_outputs)`}}),Rr=new y({}),Hr=new z({props:{code:`class MyPipeline(TextClassificationPipeline): def postprocess(): # Your code goes here scores = scores * 100 # And here my_pipeline = MyPipeline(model=model, tokenizer=tokenizer, ...) # or if you use *pipeline* function, then: my_pipeline = pipeline(model="xxxx", pipeline_class=MyPipeline)`,highlighted:`<span class="hljs-keyword">class</span> <span class="hljs-title class_">MyPipeline</span>(<span class="hljs-title class_ inherited__">TextClassificationPipeline</span>): <span class="hljs-keyword">def</span> <span class="hljs-title function_">postprocess</span>(): <span class="hljs-comment"># Your code goes here</span> scores = scores * <span class="hljs-number">100</span> <span class="hljs-comment"># And here</span> my_pipeline = MyPipeline(model=model, tokenizer=tokenizer, ...) <span class="hljs-comment"># or if you use *pipeline* function, then:</span> my_pipeline = pipeline(model=<span class="hljs-string">&quot;xxxx&quot;</span>, pipeline_class=MyPipeline)`}}),Wr=new y({}),Qr=new y({}),Vr=new y({}),Br=new w({props:{name:"class transformers.AudioClassificationPipeline",anchor:"transformers.AudioClassificationPipeline",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/audio_classification.py#L67",parametersDescription:[{anchor:"transformers.AudioClassificationPipeline.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.`,name:"model"},{anchor:"transformers.AudioClassificationPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.`,name:"tokenizer"},{anchor:"transformers.AudioClassificationPipeline.modelcard",description:`<strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code> <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.`,name:"modelcard"},{anchor:"transformers.AudioClassificationPipeline.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.`,name:"framework"},{anchor:"transformers.AudioClassificationPipeline.task",description:`<strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.`,name:"task"},{anchor:"transformers.AudioClassificationPipeline.num_workers",description:`<strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.`,name:"num_workers"},{anchor:"transformers.AudioClassificationPipeline.batch_size",description:`<strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .`,name:"batch_size"},{anchor:"transformers.AudioClassificationPipeline.args_parser",description:`<strong>args_parser</strong> (<a href="/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.`,name:"args_parser"},{anchor:"transformers.AudioClassificationPipeline.device",description:`<strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id.`,name:"device"},{anchor:"transformers.AudioClassificationPipeline.binary_output",description:`<strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.`,name:"binary_output"}]}}),Kr=new w({props:{name:"__call__",anchor:"transformers.AudioClassificationPipeline.__call__",parameters:[{name:"inputs",val:": typing.Union[numpy.ndarray, bytes, str]"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/audio_classification.py#L90",parametersDescription:[{anchor:"transformers.AudioClassificationPipeline.__call__.inputs",description:`<strong>inputs</strong> (<code>np.ndarray</code> or <code>bytes</code> or <code>str</code>) &#x2014; The inputs is either a raw waveform (<code>np.ndarray</code> of shape (n, ) of type <code>np.float32</code> or <code>np.float64</code>) at the correct sampling rate (no further check will be done) or a <code>str</code> that is the filename of the audio file, the file will be read at the correct sampling rate to get the waveform using <em>ffmpeg</em>. This requires <em>ffmpeg</em> to be installed on the system. If <em>inputs</em> is <code>bytes</code> it is supposed to be the content of an audio file and is interpreted by <em>ffmpeg</em> in the same way.`,name:"inputs"},{anchor:"transformers.AudioClassificationPipeline.__call__.top_k",description:`<strong>top_k</strong> (<code>int</code>, <em>optional</em>, defaults to None) &#x2014; The number of top labels that will be returned by the pipeline. If the provided number is <code>None</code> or higher than the number of labels available in the model configuration, it will default to the number of labels.`,name:"top_k"}],returnDescription:` <ul> <li><strong>label</strong> (<code>str</code>) \u2014 The label predicted.</li> <li><strong>score</strong> (<code>float</code>) \u2014 The corresponding probability.</li> </ul> `,returnType:` <p>A list of <code>dict</code> with the following keys</p> `}}),eo=new y({}),to=new w({props:{name:"class transformers.AutomaticSpeechRecognitionPipeline",anchor:"transformers.AutomaticSpeechRecognitionPipeline",parameters:[{name:"feature_extractor",val:": typing.Union[ForwardRef('SequenceFeatureExtractor'), str]"},{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/automatic_speech_recognition.py#L69"}}),no=new w({props:{name:"__call__",anchor:"transformers.AutomaticSpeechRecognitionPipeline.__call__",parameters:[{name:"inputs",val:": typing.Union[numpy.ndarray, bytes, str]"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/automatic_speech_recognition.py#L127",parametersDescription:[{anchor:"transformers.AutomaticSpeechRecognitionPipeline.__call__.inputs",description:`<strong>inputs</strong> (<code>np.ndarray</code> or <code>bytes</code> or <code>str</code> or <code>dict</code>) &#x2014; The inputs is either :<ul> <li><code>str</code> that is the filename of the audio file, the file will be read at the correct sampling rate to get the waveform using <em>ffmpeg</em>. This requires <em>ffmpeg</em> to be installed on the system.</li> <li><code>bytes</code> it is supposed to be the content of an audio file and is interpreted by <em>ffmpeg</em> in the same way.</li> <li>(<code>np.ndarray</code> of shape (n, ) of type <code>np.float32</code> or <code>np.float64</code>) Raw audio at the correct sampling rate (no further check will be done)</li> <li><code>dict</code> form can be used to pass raw audio sampled at arbitrary <code>sampling_rate</code> and let this pipeline do the resampling. The dict must be in the format <code>{&quot;sampling_rate&quot;: int, &quot;raw&quot;: np.array}</code> with optionally a <code>&quot;stride&quot;: (left: int, right: int)</code> than can ask the pipeline to treat the first <code>left</code> samples and last <code>right</code> samples to be ignored in decoding (but used at inference to provide more context to the model). Only use <code>stride</code> with CTC models.</li> </ul>`,name:"inputs"},{anchor:"transformers.AutomaticSpeechRecognitionPipeline.__call__.return_timestamps",description:`<strong>return_timestamps</strong> (<em>optional</em>, <code>str</code>) &#x2014; Only available for pure CTC models. If set to <code>&quot;char&quot;</code>, the pipeline will return <code>timestamps</code> along the text for every character in the text. For instance if you get <code>[{&quot;text&quot;: &quot;h&quot;, &quot;timestamps&quot;: (0.5,0.6), {&quot;text&quot;: &quot;i&quot;, &quot;timestamps&quot;: (0.7, .9)}]</code>, then it means the model predicts that the letter &#x201C;h&#x201D; was pronounced after <code>0.5</code> and before <code>0.6</code> seconds. If set to <code>&quot;word&quot;</code>, the pipeline will return <code>timestamps</code> along the text for every word in the text. For instance if you get <code>[{&quot;text&quot;: &quot;hi &quot;, &quot;timestamps&quot;: (0.5,0.9), {&quot;text&quot;: &quot;there&quot;, &quot;timestamps&quot;: (1.0, .1.5)}]</code>, then it means the model predicts that the word &#x201C;hi&#x201D; was pronounces before 0.5 and after 0.9 seconds.`,name:"return_timestamps"}],returnDescription:` <p>A dictionary with the following keys:</p> <ul> <li><strong>text</strong> (<code>str</code> ) \u2014 The recognized text.</li> <li><strong>chunks</strong> (<em>optional(, <code>List[Dict]</code>) When using <code>return_timestamps</code>, the <code>chunks</code> will become a list containing all the various text chunks identified by the model, </em>e.g.* <code>[&#123;"text": "hi ", "timestamps": (0.5,0.9), &#123;"text": "there", "timestamps": (1.0, 1.5)&#125;]</code>. The original full text can roughly be recovered by doing <code>"".join(chunk["text"] for chunk in output["chunks"])</code>.</li> </ul> `,returnType:` <p><code>Dict</code></p> `}}),oo=new y({}),so=new w({props:{name:"class transformers.Conversation",anchor:"transformers.Conversation",parameters:[{name:"text",val:": str = None"},{name:"conversation_id",val:": UUID = None"},{name:"past_user_inputs",val:" = None"},{name:"generated_responses",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/conversational.py#L19",parametersDescription:[{anchor:"transformers.Conversation.text",description:`<strong>text</strong> (<code>str</code>, <em>optional</em>) &#x2014; The initial user input to start the conversation. If not provided, a user input needs to be provided manually using the <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.Conversation.add_user_input">add_user_input()</a> method before the conversation can begin.`,name:"text"},{anchor:"transformers.Conversation.conversation_id",description:`<strong>conversation_id</strong> (<code>uuid.UUID</code>, <em>optional</em>) &#x2014; Unique identifier for the conversation. If not provided, a random UUID4 id will be assigned to the conversation.`,name:"conversation_id"},{anchor:"transformers.Conversation.past_user_inputs",description:`<strong>past_user_inputs</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; Eventual past history of the conversation of the user. You don&#x2019;t need to pass it manually if you use the pipeline interactively but if you want to recreate history you need to set both <code>past_user_inputs</code> and <code>generated_responses</code> with equal length lists of strings`,name:"past_user_inputs"},{anchor:"transformers.Conversation.generated_responses",description:`<strong>generated_responses</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; Eventual past history of the conversation of the model. You don&#x2019;t need to pass it manually if you use the pipeline interactively but if you want to recreate history you need to set both <code>past_user_inputs</code> and <code>generated_responses</code> with equal length lists of strings`,name:"generated_responses"}]}}),ao=new z({props:{code:`conversation = Conversation("Going to the movies tonight - any suggestions?") # Steps usually performed by the model when generating a response: # 1. Mark the user input as processed (moved to the history) conversation.mark_processed() # 2. Append a mode response conversation.append_response("The Big lebowski.") conversation.add_user_input("Is it good?")`,highlighted:`conversation = Conversation(<span class="hljs-string">&quot;Going to the movies tonight - any suggestions?&quot;</span>) <span class="hljs-comment"># Steps usually performed by the model when generating a response:</span> <span class="hljs-comment"># 1. Mark the user input as processed (moved to the history)</span> conversation.mark_processed() <span class="hljs-comment"># 2. Append a mode response</span> conversation.append_response(<span class="hljs-string">&quot;The Big lebowski.&quot;</span>) conversation.add_user_input(<span class="hljs-string">&quot;Is it good?&quot;</span>)`}}),io=new w({props:{name:"add_user_input",anchor:"transformers.Conversation.add_user_input",parameters:[{name:"text",val:": str"},{name:"overwrite",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/conversational.py#L83",parametersDescription:[{anchor:"transformers.Conversation.add_user_input.text",description:"<strong>text</strong> (<code>str</code>) &#x2014; The user input for the next conversation round.",name:"text"},{anchor:"transformers.Conversation.add_user_input.overwrite",description:`<strong>overwrite</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not existing and unprocessed user input should be overwritten when this function is called.`,name:"overwrite"}]}}),po=new w({props:{name:"append_response",anchor:"transformers.Conversation.append_response",parameters:[{name:"response",val:": str"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/conversational.py#L116",parametersDescription:[{anchor:"transformers.Conversation.append_response.response",description:"<strong>response</strong> (<code>str</code>) &#x2014; The model generated response.",name:"response"}]}}),co=new w({props:{name:"iter_texts",anchor:"transformers.Conversation.iter_texts",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/conversational.py#L125"}}),mo=new w({props:{name:"mark_processed",anchor:"transformers.Conversation.mark_processed",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/conversational.py#L107"}}),fo=new w({props:{name:"class transformers.ConversationalPipeline",anchor:"transformers.ConversationalPipeline",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/conversational.py#L164",parametersDescription:[{anchor:"transformers.ConversationalPipeline.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.`,name:"model"},{anchor:"transformers.ConversationalPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.`,name:"tokenizer"},{anchor:"transformers.ConversationalPipeline.modelcard",description:`<strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code> <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.`,name:"modelcard"},{anchor:"transformers.ConversationalPipeline.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.`,name:"framework"},{anchor:"transformers.ConversationalPipeline.task",description:`<strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.`,name:"task"},{anchor:"transformers.ConversationalPipeline.num_workers",description:`<strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.`,name:"num_workers"},{anchor:"transformers.ConversationalPipeline.batch_size",description:`<strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .`,name:"batch_size"},{anchor:"transformers.ConversationalPipeline.args_parser",description:`<strong>args_parser</strong> (<a href="/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.`,name:"args_parser"},{anchor:"transformers.ConversationalPipeline.device",description:`<strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id.`,name:"device"},{anchor:"transformers.ConversationalPipeline.binary_output",description:`<strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.`,name:"binary_output"},{anchor:"transformers.ConversationalPipeline.min_length_for_response",description:`<strong>min_length_for_response</strong> (<code>int</code>, <em>optional</em>, defaults to 32) &#x2014; The minimum length (in number of tokens) for a response.`,name:"min_length_for_response"},{anchor:"transformers.ConversationalPipeline.minimum_tokens",description:`<strong>minimum_tokens</strong> (<code>int</code>, <em>optional</em>, defaults to 10) &#x2014; The minimum length of tokens to leave for a response.`,name:"minimum_tokens"}]}}),uo=new z({props:{code:`conversational_pipeline = pipeline("conversational") conversation_1 = Conversation("Going to the movies tonight - any suggestions?") conversation_2 = Conversation("What's the last book you have read?") conversational_pipeline([conversation_1, conversation_2]) conversation_1.add_user_input("Is it an action movie?") conversation_2.add_user_input("What is the genre of this book?") conversational_pipeline([conversation_1, conversation_2])`,highlighted:`conversational_pipeline = pipeline(<span class="hljs-string">&quot;conversational&quot;</span>) conversation_1 = Conversation(<span class="hljs-string">&quot;Going to the movies tonight - any suggestions?&quot;</span>) conversation_2 = Conversation(<span class="hljs-string">&quot;What&#x27;s the last book you have read?&quot;</span>) conversational_pipeline([conversation_1, conversation_2]) conversation_1.add_user_input(<span class="hljs-string">&quot;Is it an action movie?&quot;</span>) conversation_2.add_user_input(<span class="hljs-string">&quot;What is the genre of this book?&quot;</span>) conversational_pipeline([conversation_1, conversation_2])`}}),go=new w({props:{name:"__call__",anchor:"transformers.ConversationalPipeline.__call__",parameters:[{name:"conversations",val:": typing.Union[transformers.pipelines.conversational.Conversation, typing.List[transformers.pipelines.conversational.Conversation]]"},{name:"num_workers",val:" = 0"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/conversational.py#L219",parametersDescription:[{anchor:"transformers.ConversationalPipeline.__call__.conversations",description:`<strong>conversations</strong> (a <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.Conversation">Conversation</a> or a list of <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.Conversation">Conversation</a>) &#x2014; Conversations to generate responses for.`,name:"conversations"},{anchor:"transformers.ConversationalPipeline.__call__.clean_up_tokenization_spaces",description:`<strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to clean up the potential extra spaces in the text output. generate_kwargs &#x2014; Additional keyword arguments to pass along to the generate method of the model (see the generate method corresponding to your framework <a href="./model#generative-models">here</a>).`,name:"clean_up_tokenization_spaces"}],returnDescription:` <p>Conversation(s) with updated generated responses for those containing a new user input.</p> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.Conversation" >Conversation</a> or a list of <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.Conversation" >Conversation</a></p> `}}),_o=new y({}),vo=new w({props:{name:"class transformers.FeatureExtractionPipeline",anchor:"transformers.FeatureExtractionPipeline",parameters:[{name:"model",val:": typing.Union[ForwardRef('PreTrainedModel'), ForwardRef('TFPreTrainedModel')]"},{name:"tokenizer",val:": typing.Optional[transformers.tokenization_utils.PreTrainedTokenizer] = None"},{name:"feature_extractor",val:": typing.Optional[ForwardRef('SequenceFeatureExtractor')] = None"},{name:"modelcard",val:": typing.Optional[transformers.modelcard.ModelCard] = None"},{name:"framework",val:": typing.Optional[str] = None"},{name:"task",val:": str = ''"},{name:"args_parser",val:": ArgumentHandler = None"},{name:"device",val:": int = -1"},{name:"binary_output",val:": bool = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/feature_extraction.py#L7",parametersDescription:[{anchor:"transformers.FeatureExtractionPipeline.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.`,name:"model"},{anchor:"transformers.FeatureExtractionPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.`,name:"tokenizer"},{anchor:"transformers.FeatureExtractionPipeline.modelcard",description:`<strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code> <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.`,name:"modelcard"},{anchor:"transformers.FeatureExtractionPipeline.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.`,name:"framework"},{anchor:"transformers.FeatureExtractionPipeline.task",description:`<strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.`,name:"task"},{anchor:"transformers.FeatureExtractionPipeline.args_parser",description:`<strong>args_parser</strong> (<a href="/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.`,name:"args_parser"},{anchor:"transformers.FeatureExtractionPipeline.device",description:`<strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id.`,name:"device"}]}}),ko=new w({props:{name:"__call__",anchor:"transformers.FeatureExtractionPipeline.__call__",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/feature_extraction.py#L69",parametersDescription:[{anchor:"transformers.FeatureExtractionPipeline.__call__.args",description:"<strong>args</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; One or several texts (or one list of texts) to get the features of.",name:"args"}],returnDescription:` <p>The features computed by the model.</p> `,returnType:` <p>A nested list of <code>float</code></p> `}}),Po=new y({}),To=new w({props:{name:"class transformers.FillMaskPipeline",anchor:"transformers.FillMaskPipeline",parameters:[{name:"model",val:": typing.Union[ForwardRef('PreTrainedModel'), ForwardRef('TFPreTrainedModel')]"},{name:"tokenizer",val:": typing.Optional[transformers.tokenization_utils.PreTrainedTokenizer] = None"},{name:"feature_extractor",val:": typing.Optional[ForwardRef('SequenceFeatureExtractor')] = None"},{name:"modelcard",val:": typing.Optional[transformers.modelcard.ModelCard] = None"},{name:"framework",val:": typing.Optional[str] = None"},{name:"task",val:": str = ''"},{name:"args_parser",val:": ArgumentHandler = None"},{name:"device",val:": int = -1"},{name:"binary_output",val:": bool = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/fill_mask.py#L33",parametersDescription:[{anchor:"transformers.FillMaskPipeline.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.`,name:"model"},{anchor:"transformers.FillMaskPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.`,name:"tokenizer"},{anchor:"transformers.FillMaskPipeline.modelcard",description:`<strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code> <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.`,name:"modelcard"},{anchor:"transformers.FillMaskPipeline.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.`,name:"framework"},{anchor:"transformers.FillMaskPipeline.task",description:`<strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.`,name:"task"},{anchor:"transformers.FillMaskPipeline.num_workers",description:`<strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.`,name:"num_workers"},{anchor:"transformers.FillMaskPipeline.batch_size",description:`<strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .`,name:"batch_size"},{anchor:"transformers.FillMaskPipeline.args_parser",description:`<strong>args_parser</strong> (<a href="/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.`,name:"args_parser"},{anchor:"transformers.FillMaskPipeline.device",description:`<strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id.`,name:"device"},{anchor:"transformers.FillMaskPipeline.binary_output",description:`<strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.`,name:"binary_output"},{anchor:"transformers.FillMaskPipeline.top_k",description:`<strong>top_k</strong> (<code>int</code>, defaults to 5) &#x2014; The number of predictions to return.`,name:"top_k"},{anchor:"transformers.FillMaskPipeline.targets",description:`<strong>targets</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014; When passed, the model will limit the scores to the passed targets instead of looking up in the whole vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting token will be used (with a warning, and that might be slower).`,name:"targets"}]}}),kn=new kA({props:{$$slots:{default:[EA]},$$scope:{ctx:vr}}}),$o=new w({props:{name:"__call__",anchor:"transformers.FillMaskPipeline.__call__",parameters:[{name:"inputs",val:""},{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/fill_mask.py#L204",parametersDescription:[{anchor:"transformers.FillMaskPipeline.__call__.args",description:`<strong>args</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; One or several texts (or one list of prompts) with masked tokens.`,name:"args"},{anchor:"transformers.FillMaskPipeline.__call__.targets",description:`<strong>targets</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014; When passed, the model will limit the scores to the passed targets instead of looking up in the whole vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting token will be used (with a warning, and that might be slower).`,name:"targets"},{anchor:"transformers.FillMaskPipeline.__call__.top_k",description:`<strong>top_k</strong> (<code>int</code>, <em>optional</em>) &#x2014; When passed, overrides the number of predictions to return.`,name:"top_k"}],returnDescription:` <p>Each result comes as list of dictionaries with the following keys:</p> <ul> <li><strong>sequence</strong> (<code>str</code>) \u2014 The corresponding input with the mask token prediction.</li> <li><strong>score</strong> (<code>float</code>) \u2014 The corresponding probability.</li> <li><strong>token</strong> (<code>int</code>) \u2014 The predicted token id (to replace the masked one).</li> <li><strong>token</strong> (<code>str</code>) \u2014 The predicted token (to replace the masked one).</li> </ul> `,returnType:` <p>A list or a list of list of <code>dict</code></p> `}}),Eo=new y({}),qo=new w({props:{name:"class transformers.ImageClassificationPipeline",anchor:"transformers.ImageClassificationPipeline",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/image_classification.py#L31",parametersDescription:[{anchor:"transformers.ImageClassificationPipeline.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.`,name:"model"},{anchor:"transformers.ImageClassificationPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.`,name:"tokenizer"},{anchor:"transformers.ImageClassificationPipeline.modelcard",description:`<strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code> <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.`,name:"modelcard"},{anchor:"transformers.ImageClassificationPipeline.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.`,name:"framework"},{anchor:"transformers.ImageClassificationPipeline.task",description:`<strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.`,name:"task"},{anchor:"transformers.ImageClassificationPipeline.num_workers",description:`<strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.`,name:"num_workers"},{anchor:"transformers.ImageClassificationPipeline.batch_size",description:`<strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .`,name:"batch_size"},{anchor:"transformers.ImageClassificationPipeline.args_parser",description:`<strong>args_parser</strong> (<a href="/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.`,name:"args_parser"},{anchor:"transformers.ImageClassificationPipeline.device",description:`<strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id.`,name:"device"},{anchor:"transformers.ImageClassificationPipeline.binary_output",description:`<strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.`,name:"binary_output"}]}}),Do=new w({props:{name:"__call__",anchor:"transformers.ImageClassificationPipeline.__call__",parameters:[{name:"images",val:": typing.Union[str, typing.List[str], ForwardRef('Image.Image'), typing.List[ForwardRef('Image.Image')]]"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/image_classification.py#L58",parametersDescription:[{anchor:"transformers.ImageClassificationPipeline.__call__.images",description:`<strong>images</strong> (<code>str</code>, <code>List[str]</code>, <code>PIL.Image</code> or <code>List[PIL.Image]</code>) &#x2014; The pipeline handles three types of images:</p> <ul> <li>A string containing a http link pointing to an image</li> <li>A string containing a local path to an image</li> <li>An image loaded in PIL directly</li> </ul> <p>The pipeline accepts either a single image or a batch of images, which must then be passed as a string. Images in a batch must all be in the same format: all as http links, all as local paths, or all as PIL images.`,name:"images"},{anchor:"transformers.ImageClassificationPipeline.__call__.top_k",description:`<strong>top_k</strong> (<code>int</code>, <em>optional</em>, defaults to 5) &#x2014; The number of top labels that will be returned by the pipeline. If the provided number is higher than the number of labels available in the model configuration, it will default to the number of labels.`,name:"top_k"}],returnDescription:` <p>A dictionary or a list of dictionaries containing result. If the input is a single image, will return a dictionary, if the input is a list of several images, will return a list of dictionaries corresponding to the images.</p> <p>The dictionaries contain the following keys:</p> <ul> <li><strong>label</strong> (<code>str</code>) \u2014 The label identified by the model.</li> <li><strong>score</strong> (<code>int</code>) \u2014 The score attributed by the model for that label.</li> </ul> `}}),Io=new y({}),jo=new w({props:{name:"class transformers.ImageSegmentationPipeline",anchor:"transformers.ImageSegmentationPipeline",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/image_segmentation.py#L34",parametersDescription:[{anchor:"transformers.ImageSegmentationPipeline.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.`,name:"model"},{anchor:"transformers.ImageSegmentationPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.`,name:"tokenizer"},{anchor:"transformers.ImageSegmentationPipeline.modelcard",description:`<strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code> <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.`,name:"modelcard"},{anchor:"transformers.ImageSegmentationPipeline.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.`,name:"framework"},{anchor:"transformers.ImageSegmentationPipeline.task",description:`<strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.`,name:"task"},{anchor:"transformers.ImageSegmentationPipeline.num_workers",description:`<strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.`,name:"num_workers"},{anchor:"transformers.ImageSegmentationPipeline.batch_size",description:`<strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .`,name:"batch_size"},{anchor:"transformers.ImageSegmentationPipeline.args_parser",description:`<strong>args_parser</strong> (<a href="/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.`,name:"args_parser"},{anchor:"transformers.ImageSegmentationPipeline.device",description:`<strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id.`,name:"device"},{anchor:"transformers.ImageSegmentationPipeline.binary_output",description:`<strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.`,name:"binary_output"}]}}),Lo=new w({props:{name:"__call__",anchor:"transformers.ImageSegmentationPipeline.__call__",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/image_segmentation.py#L69",parametersDescription:[{anchor:"transformers.ImageSegmentationPipeline.__call__.images",description:`<strong>images</strong> (<code>str</code>, <code>List[str]</code>, <code>PIL.Image</code> or <code>List[PIL.Image]</code>) &#x2014; The pipeline handles three types of images:</p> <ul> <li>A string containing an HTTP(S) link pointing to an image</li> <li>A string containing a local path to an image</li> <li>An image loaded in PIL directly</li> </ul> <p>The pipeline accepts either a single image or a batch of images. Images in a batch must all be in the same format: all as HTTP(S) links, all as local paths, or all as PIL images.`,name:"images"},{anchor:"transformers.ImageSegmentationPipeline.__call__.threshold",description:`<strong>threshold</strong> (<code>float</code>, <em>optional</em>, defaults to 0.9) &#x2014; The probability necessary to make a prediction.`,name:"threshold"},{anchor:"transformers.ImageSegmentationPipeline.__call__.mask_threshold",description:`<strong>mask_threshold</strong> (<code>float</code>, <em>optional</em>, defaults to 0.5) &#x2014; Threshold to use when turning the predicted masks into binary values.`,name:"mask_threshold"}],returnDescription:` <p>A dictionary or a list of dictionaries containing the result. If the input is a single image, will return a list of dictionaries, if the input is a list of several images, will return a list of list of dictionaries corresponding to each image.</p> <p>The dictionaries contain the following keys:</p> <ul> <li><strong>label</strong> (<code>str</code>) \u2014 The class label identified by the model.</li> <li><strong>mask</strong> (<code>PIL.Image</code>) \u2014 Pil Image with size (heigth, width) of the original image. Pixel values in the image are in the range 0-255. 0 means the pixel is <em>not</em> part of the <em>label</em>, 255 means it definitely is.</li> <li><strong>score</strong> (<em>optional</em> <code>float</code>) \u2014 Optionally, when the model is capable of estimating a confidence of the \u201Cobject\u201D described by the label and the mask.</li> </ul> `}}),Uo=new y({}),Go=new w({props:{name:"class transformers.TokenClassificationPipeline",anchor:"transformers.TokenClassificationPipeline",parameters:[{name:"args_parser",val:" = <transformers.pipelines.token_classification.TokenClassificationArgumentHandler object at 0x7f45b52f37c0>"},{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/token_classification.py#L86",parametersDescription:[{anchor:"transformers.TokenClassificationPipeline.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.`,name:"model"},{anchor:"transformers.TokenClassificationPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.`,name:"tokenizer"},{anchor:"transformers.TokenClassificationPipeline.modelcard",description:`<strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code> <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.`,name:"modelcard"},{anchor:"transformers.TokenClassificationPipeline.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.`,name:"framework"},{anchor:"transformers.TokenClassificationPipeline.task",description:`<strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.`,name:"task"},{anchor:"transformers.TokenClassificationPipeline.num_workers",description:`<strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.`,name:"num_workers"},{anchor:"transformers.TokenClassificationPipeline.batch_size",description:`<strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .`,name:"batch_size"},{anchor:"transformers.TokenClassificationPipeline.args_parser",description:`<strong>args_parser</strong> (<a href="/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.`,name:"args_parser"},{anchor:"transformers.TokenClassificationPipeline.device",description:`<strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id.`,name:"device"},{anchor:"transformers.TokenClassificationPipeline.binary_output",description:`<strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.`,name:"binary_output"},{anchor:"transformers.TokenClassificationPipeline.ignore_labels",description:`<strong>ignore_labels</strong> (<code>List[str]</code>, defaults to <code>[&quot;O&quot;]</code>) &#x2014; A list of labels to ignore.`,name:"ignore_labels"},{anchor:"transformers.TokenClassificationPipeline.grouped_entities",description:`<strong>grouped_entities</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; DEPRECATED, use <code>aggregation_strategy</code> instead. Whether or not to group the tokens corresponding to the same entity together in the predictions or not.`,name:"grouped_entities"},{anchor:"transformers.TokenClassificationPipeline.aggregation_strategy",description:`<strong>aggregation_strategy</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;none&quot;</code>) &#x2014; The strategy to fuse (or not) tokens based on the model prediction.</p> <ul> <li>&#x201C;none&#x201D; : Will simply not do any aggregation and simply return raw results from the model</li> <li>&#x201C;simple&#x201D; : Will attempt to group entities following the default schema. (A, B-TAG), (B, I-TAG), (C, I-TAG), (D, B-TAG2) (E, B-TAG2) will end up being [{&#x201C;word&#x201D;: ABC, &#x201C;entity&#x201D;: &#x201C;TAG&#x201D;}, {&#x201C;word&#x201D;: &#x201C;D&#x201D;, &#x201C;entity&#x201D;: &#x201C;TAG2&#x201D;}, {&#x201C;word&#x201D;: &#x201C;E&#x201D;, &#x201C;entity&#x201D;: &#x201C;TAG2&#x201D;}] Notice that two consecutive B tags will end up as different entities. On word based languages, we might end up splitting words undesirably : Imagine Microsoft being tagged as [{&#x201C;word&#x201D;: &#x201C;Micro&#x201D;, &#x201C;entity&#x201D;: &#x201C;ENTERPRISE&#x201D;}, {&#x201C;word&#x201D;: &#x201C;soft&#x201D;, &#x201C;entity&#x201D;: &#x201C;NAME&#x201D;}]. Look for FIRST, MAX, AVERAGE for ways to mitigate that and disambiguate words (on languages that support that meaning, which is basically tokens separated by a space). These mitigations will only work on real words, &#x201C;New york&#x201D; might still be tagged with two different entities.</li> <li>&#x201C;first&#x201D; : (works only on word based models) Will use the <code>SIMPLE</code> strategy except that words, cannot end up with different tags. Words will simply use the tag of the first token of the word when there is ambiguity.</li> <li>&#x201C;average&#x201D; : (works only on word based models) Will use the <code>SIMPLE</code> strategy except that words, cannot end up with different tags. scores will be averaged first across tokens, and then the maximum label is applied.</li> <li>&#x201C;max&#x201D; : (works only on word based models) Will use the <code>SIMPLE</code> strategy except that words, cannot end up with different tags. Word entity will simply be the token with the maximum score.</li> </ul>`,name:"aggregation_strategy"}]}}),Ro=new w({props:{name:"aggregate_words",anchor:"transformers.TokenClassificationPipeline.aggregate_words",parameters:[{name:"entities",val:": typing.List[dict]"},{name:"aggregation_strategy",val:": AggregationStrategy"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/token_classification.py#L366"}}),Ho=new w({props:{name:"gather_pre_entities",anchor:"transformers.TokenClassificationPipeline.gather_pre_entities",parameters:[{name:"sentence",val:": str"},{name:"input_ids",val:": ndarray"},{name:"scores",val:": ndarray"},{name:"offset_mapping",val:": typing.Union[typing.List[typing.Tuple[int, int]], NoneType]"},{name:"special_tokens_mask",val:": ndarray"},{name:"aggregation_strategy",val:": AggregationStrategy"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/token_classification.py#L252"}}),Wo=new w({props:{name:"group_entities",anchor:"transformers.TokenClassificationPipeline.group_entities",parameters:[{name:"entities",val:": typing.List[dict]"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/token_classification.py#L428",parametersDescription:[{anchor:"transformers.TokenClassificationPipeline.group_entities.entities",description:"<strong>entities</strong> (<code>dict</code>) &#x2014; The entities predicted by the pipeline.",name:"entities"}]}}),Qo=new w({props:{name:"group_sub_entities",anchor:"transformers.TokenClassificationPipeline.group_sub_entities",parameters:[{name:"entities",val:": typing.List[dict]"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/token_classification.py#L393",parametersDescription:[{anchor:"transformers.TokenClassificationPipeline.group_sub_entities.entities",description:"<strong>entities</strong> (<code>dict</code>) &#x2014; The entities predicted by the pipeline.",name:"entities"}]}}),Vo=new y({}),Bo=new w({props:{name:"class transformers.ObjectDetectionPipeline",anchor:"transformers.ObjectDetectionPipeline",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/object_detection.py#L25",parametersDescription:[{anchor:"transformers.ObjectDetectionPipeline.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.`,name:"model"},{anchor:"transformers.ObjectDetectionPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.`,name:"tokenizer"},{anchor:"transformers.ObjectDetectionPipeline.modelcard",description:`<strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code> <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.`,name:"modelcard"},{anchor:"transformers.ObjectDetectionPipeline.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.`,name:"framework"},{anchor:"transformers.ObjectDetectionPipeline.task",description:`<strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.`,name:"task"},{anchor:"transformers.ObjectDetectionPipeline.num_workers",description:`<strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.`,name:"num_workers"},{anchor:"transformers.ObjectDetectionPipeline.batch_size",description:`<strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .`,name:"batch_size"},{anchor:"transformers.ObjectDetectionPipeline.args_parser",description:`<strong>args_parser</strong> (<a href="/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.`,name:"args_parser"},{anchor:"transformers.ObjectDetectionPipeline.device",description:`<strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id.`,name:"device"},{anchor:"transformers.ObjectDetectionPipeline.binary_output",description:`<strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.`,name:"binary_output"}]}}),Ko=new w({props:{name:"__call__",anchor:"transformers.ObjectDetectionPipeline.__call__",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/object_detection.py#L51",parametersDescription:[{anchor:"transformers.ObjectDetectionPipeline.__call__.images",description:`<strong>images</strong> (<code>str</code>, <code>List[str]</code>, <code>PIL.Image</code> or <code>List[PIL.Image]</code>) &#x2014; The pipeline handles three types of images:</p> <ul> <li>A string containing an HTTP(S) link pointing to an image</li> <li>A string containing a local path to an image</li> <li>An image loaded in PIL directly</li> </ul> <p>The pipeline accepts either a single image or a batch of images. Images in a batch must all be in the same format: all as HTTP(S) links, all as local paths, or all as PIL images.`,name:"images"},{anchor:"transformers.ObjectDetectionPipeline.__call__.threshold",description:`<strong>threshold</strong> (<code>float</code>, <em>optional</em>, defaults to 0.9) &#x2014; The probability necessary to make a prediction.`,name:"threshold"}],returnDescription:` <p>A list of dictionaries or a list of list of dictionaries containing the result. If the input is a single image, will return a list of dictionaries, if the input is a list of several images, will return a list of list of dictionaries corresponding to each image.</p> <p>The dictionaries contain the following keys:</p> <ul> <li><strong>label</strong> (<code>str</code>) \u2014 The class label identified by the model.</li> <li><strong>score</strong> (<code>float</code>) \u2014 The score attributed by the model for that label.</li> <li><strong>box</strong> (<code>List[Dict[str, int]]</code>) \u2014 The bounding box of detected object in image\u2019s original size.</li> </ul> `}}),Jo=new y({}),es=new w({props:{name:"class transformers.QuestionAnsweringPipeline",anchor:"transformers.QuestionAnsweringPipeline",parameters:[{name:"model",val:": typing.Union[ForwardRef('PreTrainedModel'), ForwardRef('TFPreTrainedModel')]"},{name:"tokenizer",val:": PreTrainedTokenizer"},{name:"modelcard",val:": typing.Optional[transformers.modelcard.ModelCard] = None"},{name:"framework",val:": typing.Optional[str] = None"},{name:"device",val:": int = -1"},{name:"task",val:": str = ''"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/question_answering.py#L102",parametersDescription:[{anchor:"transformers.QuestionAnsweringPipeline.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.`,name:"model"},{anchor:"transformers.QuestionAnsweringPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.`,name:"tokenizer"},{anchor:"transformers.QuestionAnsweringPipeline.modelcard",description:`<strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code> <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.`,name:"modelcard"},{anchor:"transformers.QuestionAnsweringPipeline.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.`,name:"framework"},{anchor:"transformers.QuestionAnsweringPipeline.task",description:`<strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.`,name:"task"},{anchor:"transformers.QuestionAnsweringPipeline.num_workers",description:`<strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.`,name:"num_workers"},{anchor:"transformers.QuestionAnsweringPipeline.batch_size",description:`<strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .`,name:"batch_size"},{anchor:"transformers.QuestionAnsweringPipeline.args_parser",description:`<strong>args_parser</strong> (<a href="/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.`,name:"args_parser"},{anchor:"transformers.QuestionAnsweringPipeline.device",description:`<strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id.`,name:"device"},{anchor:"transformers.QuestionAnsweringPipeline.binary_output",description:`<strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.`,name:"binary_output"}]}}),rs=new w({props:{name:"__call__",anchor:"transformers.QuestionAnsweringPipeline.__call__",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/question_answering.py#L205",parametersDescription:[{anchor:"transformers.QuestionAnsweringPipeline.__call__.args",description:`<strong>args</strong> (<code>SquadExample</code>or a list of <code>SquadExample</code> &#x2014; One or several <code>SquadExample</code>containing the question and context.`,name:"args"},{anchor:"transformers.QuestionAnsweringPipeline.__call__.X",description:`<strong>X</strong> (<code>SquadExample</code>or a list of <code>SquadExample</code> <em>optional</em>) &#x2014; One or several <code>SquadExample</code>containing the question and context (will be treated the same way as if passed as the first positional argument).`,name:"X"},{anchor:"transformers.QuestionAnsweringPipeline.__call__.data",description:`<strong>data</strong> (<code>SquadExample</code>or a list of <code>SquadExample</code> <em>optional</em>) &#x2014; One or several <code>SquadExample</code>containing the question and context (will be treated the same way as if passed as the first positional argument).`,name:"data"},{anchor:"transformers.QuestionAnsweringPipeline.__call__.question",description:`<strong>question</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; One or several question(s) (must be used in conjunction with the <code>context</code> argument).`,name:"question"},{anchor:"transformers.QuestionAnsweringPipeline.__call__.context",description:`<strong>context</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; One or several context(s) associated with the question(s) (must be used in conjunction with the <code>question</code> argument).`,name:"context"},{anchor:"transformers.QuestionAnsweringPipeline.__call__.topk",description:`<strong>topk</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; The number of answers to return (will be chosen by order of likelihood). Note that we return less than topk answers if there are not enough options available within the context.`,name:"topk"},{anchor:"transformers.QuestionAnsweringPipeline.__call__.doc_stride",description:`<strong>doc_stride</strong> (<code>int</code>, <em>optional</em>, defaults to 128) &#x2014; If the context is too long to fit with the question for the model, it will be split in several chunks with some overlap. This argument controls the size of that overlap.`,name:"doc_stride"},{anchor:"transformers.QuestionAnsweringPipeline.__call__.max_answer_len",description:`<strong>max_answer_len</strong> (<code>int</code>, <em>optional</em>, defaults to 15) &#x2014; The maximum length of predicted answers (e.g., only answers with a shorter length are considered).`,name:"max_answer_len"},{anchor:"transformers.QuestionAnsweringPipeline.__call__.max_seq_len",description:`<strong>max_seq_len</strong> (<code>int</code>, <em>optional</em>, defaults to 384) &#x2014; The maximum length of the total sentence (context + question) after tokenization. The context will be split in several chunks (using <code>doc_stride</code>) if needed.`,name:"max_seq_len"},{anchor:"transformers.QuestionAnsweringPipeline.__call__.max_question_len",description:`<strong>max_question_len</strong> (<code>int</code>, <em>optional</em>, defaults to 64) &#x2014; The maximum length of the question after tokenization. It will be truncated if needed.`,name:"max_question_len"},{anchor:"transformers.QuestionAnsweringPipeline.__call__.handle_impossible_answer",description:`<strong>handle_impossible_answer</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not we accept impossible as an answer.`,name:"handle_impossible_answer"}],returnDescription:` <p>Each result comes as a dictionary with the following keys:</p> <ul> <li><strong>score</strong> (<code>float</code>) \u2014 The probability associated to the answer.</li> <li><strong>start</strong> (<code>int</code>) \u2014 The character start index of the answer (in the tokenized version of the input).</li> <li><strong>end</strong> (<code>int</code>) \u2014 The character end index of the answer (in the tokenized version of the input).</li> <li><strong>answer</strong> (<code>str</code>) \u2014 The answer to the question.</li> </ul> `,returnType:` <p>A <code>dict</code> or a list of <code>dict</code></p> `}}),os=new w({props:{name:"create_sample",anchor:"transformers.QuestionAnsweringPipeline.create_sample",parameters:[{name:"question",val:": typing.Union[str, typing.List[str]]"},{name:"context",val:": typing.Union[str, typing.List[str]]"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/question_answering.py#L143",parametersDescription:[{anchor:"transformers.QuestionAnsweringPipeline.create_sample.question",description:"<strong>question</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; The question(s) asked.",name:"question"},{anchor:"transformers.QuestionAnsweringPipeline.create_sample.context",description:"<strong>context</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; The context(s) in which we will look for the answer.",name:"context"}],returnDescription:` <p>The corresponding <code>SquadExample</code>grouping question and context.</p> `}}),ss=new w({props:{name:"decode",anchor:"transformers.QuestionAnsweringPipeline.decode",parameters:[{name:"start",val:": ndarray"},{name:"end",val:": ndarray"},{name:"topk",val:": int"},{name:"max_answer_len",val:": int"},{name:"undesired_tokens",val:": ndarray"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/question_answering.py#L481",parametersDescription:[{anchor:"transformers.QuestionAnsweringPipeline.decode.start",description:"<strong>start</strong> (<code>np.ndarray</code>) &#x2014; Individual start probabilities for each token.",name:"start"},{anchor:"transformers.QuestionAnsweringPipeline.decode.end",description:"<strong>end</strong> (<code>np.ndarray</code>) &#x2014; Individual end probabilities for each token.",name:"end"},{anchor:"transformers.QuestionAnsweringPipeline.decode.topk",description:"<strong>topk</strong> (<code>int</code>) &#x2014; Indicates how many possible answer span(s) to extract from the model output.",name:"topk"},{anchor:"transformers.QuestionAnsweringPipeline.decode.max_answer_len",description:"<strong>max_answer_len</strong> (<code>int</code>) &#x2014; Maximum size of the answer to extract from the model&#x2019;s output.",name:"max_answer_len"},{anchor:"transformers.QuestionAnsweringPipeline.decode.undesired_tokens",description:"<strong>undesired_tokens</strong> (<code>np.ndarray</code>) &#x2014; Mask determining tokens that can be part of the answer",name:"undesired_tokens"}]}}),is=new w({props:{name:"span_to_answer",anchor:"transformers.QuestionAnsweringPipeline.span_to_answer",parameters:[{name:"text",val:": str"},{name:"start",val:": int"},{name:"end",val:": int"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/question_answering.py#L530",parametersDescription:[{anchor:"transformers.QuestionAnsweringPipeline.span_to_answer.text",description:"<strong>text</strong> (<code>str</code>) &#x2014; The actual context to extract the answer from.",name:"text"},{anchor:"transformers.QuestionAnsweringPipeline.span_to_answer.start",description:"<strong>start</strong> (<code>int</code>) &#x2014; The answer starting token index.",name:"start"},{anchor:"transformers.QuestionAnsweringPipeline.span_to_answer.end",description:"<strong>end</strong> (<code>int</code>) &#x2014; The answer end token index.",name:"end"}],returnDescription:` <p>str, \u2018start\u2019: int, \u2018end\u2019: int}\`</p> `,returnType:` <p>Dictionary like \`{\u2018answer\u2019</p> `}}),ls=new y({}),ps=new w({props:{name:"class transformers.SummarizationPipeline",anchor:"transformers.SummarizationPipeline",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/text2text_generation.py#L186",parametersDescription:[{anchor:"transformers.SummarizationPipeline.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.`,name:"model"},{anchor:"transformers.SummarizationPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.`,name:"tokenizer"},{anchor:"transformers.SummarizationPipeline.modelcard",description:`<strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code> <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.`,name:"modelcard"},{anchor:"transformers.SummarizationPipeline.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.`,name:"framework"},{anchor:"transformers.SummarizationPipeline.task",description:`<strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.`,name:"task"},{anchor:"transformers.SummarizationPipeline.num_workers",description:`<strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.`,name:"num_workers"},{anchor:"transformers.SummarizationPipeline.batch_size",description:`<strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .`,name:"batch_size"},{anchor:"transformers.SummarizationPipeline.args_parser",description:`<strong>args_parser</strong> (<a href="/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.`,name:"args_parser"},{anchor:"transformers.SummarizationPipeline.device",description:`<strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id.`,name:"device"},{anchor:"transformers.SummarizationPipeline.binary_output",description:`<strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.`,name:"binary_output"}]}}),ds=new z({props:{code:`# use bart in pytorch summarizer = pipeline("summarization") summarizer("An apple a day, keeps the doctor away", min_length=5, max_length=20) # use t5 in tf summarizer = pipeline("summarization", model="t5-base", tokenizer="t5-base", framework="tf") summarizer("An apple a day, keeps the doctor away", min_length=5, max_length=20)`,highlighted:`<span class="hljs-comment"># use bart in pytorch</span> summarizer = pipeline(<span class="hljs-string">&quot;summarization&quot;</span>) summarizer(<span class="hljs-string">&quot;An apple a day, keeps the doctor away&quot;</span>, min_length=<span class="hljs-number">5</span>, max_length=<span class="hljs-number">20</span>) <span class="hljs-comment"># use t5 in tf</span> summarizer = pipeline(<span class="hljs-string">&quot;summarization&quot;</span>, model=<span class="hljs-string">&quot;t5-base&quot;</span>, tokenizer=<span class="hljs-string">&quot;t5-base&quot;</span>, framework=<span class="hljs-string">&quot;tf&quot;</span>) summarizer(<span class="hljs-string">&quot;An apple a day, keeps the doctor away&quot;</span>, min_length=<span class="hljs-number">5</span>, max_length=<span class="hljs-number">20</span>)`}}),ms=new w({props:{name:"__call__",anchor:"transformers.SummarizationPipeline.__call__",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/text2text_generation.py#L212",parametersDescription:[{anchor:"transformers.SummarizationPipeline.__call__.documents",description:`<strong>documents</strong> (<em>str</em> or <code>List[str]</code>) &#x2014; One or several articles (or one list of articles) to summarize.`,name:"documents"},{anchor:"transformers.SummarizationPipeline.__call__.return_text",description:`<strong>return_text</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to include the decoded texts in the outputs`,name:"return_text"},{anchor:"transformers.SummarizationPipeline.__call__.return_tensors",description:`<strong>return_tensors</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to include the tensors of predictions (as token indices) in the outputs.`,name:"return_tensors"},{anchor:"transformers.SummarizationPipeline.__call__.clean_up_tokenization_spaces",description:`<strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to clean up the potential extra spaces in the text output. generate_kwargs &#x2014; Additional keyword arguments to pass along to the generate method of the model (see the generate method corresponding to your framework <a href="./model#generative-models">here</a>).`,name:"clean_up_tokenization_spaces"}],returnDescription:` <p>Each result comes as a dictionary with the following keys:</p> <ul> <li><strong>summary_text</strong> (<code>str</code>, present when <code>return_text=True</code>) \u2014 The summary of the corresponding input.</li> <li><strong>summary_token_ids</strong> (<code>torch.Tensor</code> or <code>tf.Tensor</code>, present when <code>return_tensors=True</code>) \u2014 The token ids of the summary.</li> </ul> `,returnType:` <p>A list or a list of list of <code>dict</code></p> `}}),fs=new y({}),hs=new w({props:{name:"class transformers.TableQuestionAnsweringPipeline",anchor:"transformers.TableQuestionAnsweringPipeline",parameters:[{name:"args_parser",val:" = <transformers.pipelines.table_question_answering.TableQuestionAnsweringArgumentHandler object at 0x7f45b52d1cd0>"},{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/table_question_answering.py#L83",parametersDescription:[{anchor:"transformers.TableQuestionAnsweringPipeline.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.`,name:"model"},{anchor:"transformers.TableQuestionAnsweringPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.`,name:"tokenizer"},{anchor:"transformers.TableQuestionAnsweringPipeline.modelcard",description:`<strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code> <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.`,name:"modelcard"},{anchor:"transformers.TableQuestionAnsweringPipeline.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.`,name:"framework"},{anchor:"transformers.TableQuestionAnsweringPipeline.task",description:`<strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.`,name:"task"},{anchor:"transformers.TableQuestionAnsweringPipeline.num_workers",description:`<strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.`,name:"num_workers"},{anchor:"transformers.TableQuestionAnsweringPipeline.batch_size",description:`<strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .`,name:"batch_size"},{anchor:"transformers.TableQuestionAnsweringPipeline.args_parser",description:`<strong>args_parser</strong> (<a href="/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.`,name:"args_parser"},{anchor:"transformers.TableQuestionAnsweringPipeline.device",description:`<strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id.`,name:"device"},{anchor:"transformers.TableQuestionAnsweringPipeline.binary_output",description:`<strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.`,name:"binary_output"}]}}),vs=new w({props:{name:"__call__",anchor:"transformers.TableQuestionAnsweringPipeline.__call__",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/table_question_answering.py#L244",parametersDescription:[{anchor:"transformers.TableQuestionAnsweringPipeline.__call__.table",description:`<strong>table</strong> (<code>pd.DataFrame</code> or <code>Dict</code>) &#x2014; Pandas DataFrame or dictionary that will be converted to a DataFrame containing all the table values. See above for an example of dictionary.`,name:"table"},{anchor:"transformers.TableQuestionAnsweringPipeline.__call__.query",description:`<strong>query</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; Query or list of queries that will be sent to the model alongside the table.`,name:"query"},{anchor:"transformers.TableQuestionAnsweringPipeline.__call__.sequential",description:`<strong>sequential</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to do inference sequentially or as a batch. Batching is faster, but models like SQA require the inference to be done sequentially to extract relations within sequences, given their conversational nature.`,name:"sequential"},{anchor:"transformers.TableQuestionAnsweringPipeline.__call__.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.TableQuestionAnsweringPipeline.__call__.truncation",description:`<strong>truncation</strong> (<code>bool</code>, <code>str</code> or <code>TapasTruncationStrategy</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;drop_rows_to_fit&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate row by row, removing rows from the table.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul>`,name:"truncation"}],returnDescription:` <p>Each result is a dictionary with the following keys:</p> <ul> <li><strong>answer</strong> (<code>str</code>) \u2014 The answer of the query given the table. If there is an aggregator, the answer will be preceded by <code>AGGREGATOR &gt;</code>.</li> <li><strong>coordinates</strong> (<code>List[Tuple[int, int]]</code>) \u2014 Coordinates of the cells of the answers.</li> <li><strong>cells</strong> (<code>List[str]</code>) \u2014 List of strings made up of the answer cell values.</li> <li><strong>aggregator</strong> (<code>str</code>) \u2014 If the model has an aggregator, this returns the aggregator.</li> </ul> `,returnType:` <p>A dictionary or a list of dictionaries containing results</p> `}}),bs=new z({props:{code:`data = { "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }`,highlighted:`data = { <span class="hljs-string">&quot;actors&quot;</span>: [<span class="hljs-string">&quot;brad pitt&quot;</span>, <span class="hljs-string">&quot;leonardo di caprio&quot;</span>, <span class="hljs-string">&quot;george clooney&quot;</span>], <span class="hljs-string">&quot;age&quot;</span>: [<span class="hljs-string">&quot;56&quot;</span>, <span class="hljs-string">&quot;45&quot;</span>, <span class="hljs-string">&quot;59&quot;</span>], <span class="hljs-string">&quot;number of movies&quot;</span>: [<span class="hljs-string">&quot;87&quot;</span>, <span class="hljs-string">&quot;53&quot;</span>, <span class="hljs-string">&quot;69&quot;</span>], <span class="hljs-string">&quot;date of birth&quot;</span>: [<span class="hljs-string">&quot;7 february 1967&quot;</span>, <span class="hljs-string">&quot;10 june 1996&quot;</span>, <span class="hljs-string">&quot;28 november 1967&quot;</span>], }`}}),ks=new z({props:{code:`import pandas as pd table = pd.DataFrame.from_dict(data)`,highlighted:`<span class="hljs-keyword">import</span> pandas <span class="hljs-keyword">as</span> pd table = pd.DataFrame.from_dict(data)`}}),Ps=new y({}),Ts=new w({props:{name:"class transformers.TextClassificationPipeline",anchor:"transformers.TextClassificationPipeline",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/text_classification.py#L47",parametersDescription:[{anchor:"transformers.TextClassificationPipeline.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.`,name:"model"},{anchor:"transformers.TextClassificationPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.`,name:"tokenizer"},{anchor:"transformers.TextClassificationPipeline.modelcard",description:`<strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code> <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.`,name:"modelcard"},{anchor:"transformers.TextClassificationPipeline.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.`,name:"framework"},{anchor:"transformers.TextClassificationPipeline.task",description:`<strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.`,name:"task"},{anchor:"transformers.TextClassificationPipeline.num_workers",description:`<strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.`,name:"num_workers"},{anchor:"transformers.TextClassificationPipeline.batch_size",description:`<strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .`,name:"batch_size"},{anchor:"transformers.TextClassificationPipeline.args_parser",description:`<strong>args_parser</strong> (<a href="/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.`,name:"args_parser"},{anchor:"transformers.TextClassificationPipeline.device",description:`<strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id.`,name:"device"},{anchor:"transformers.TextClassificationPipeline.binary_output",description:`<strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.`,name:"binary_output"},{anchor:"transformers.TextClassificationPipeline.return_all_scores",description:`<strong>return_all_scores</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to return all prediction scores or just the one of the predicted class.`,name:"return_all_scores"},{anchor:"transformers.TextClassificationPipeline.function_to_apply",description:`<strong>function_to_apply</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;default&quot;</code>) &#x2014; The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:</p> <ul> <li><code>&quot;default&quot;</code>: if the model has a single label, will apply the sigmoid function on the output. If the model has several labels, will apply the softmax function on the output.</li> <li><code>&quot;sigmoid&quot;</code>: Applies the sigmoid function on the output.</li> <li><code>&quot;softmax&quot;</code>: Applies the softmax function on the output.</li> <li><code>&quot;none&quot;</code>: Does not apply any function on the output.</li> </ul>`,name:"function_to_apply"}]}}),Es=new w({props:{name:"__call__",anchor:"transformers.TextClassificationPipeline.__call__",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/text_classification.py#L92",parametersDescription:[{anchor:"transformers.TextClassificationPipeline.__call__.args",description:`<strong>args</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; One or several texts (or one list of prompts) to classify.`,name:"args"},{anchor:"transformers.TextClassificationPipeline.__call__.return_all_scores",description:`<strong>return_all_scores</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to return scores for all labels.`,name:"return_all_scores"},{anchor:"transformers.TextClassificationPipeline.__call__.function_to_apply",description:`<strong>function_to_apply</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;default&quot;</code>) &#x2014; The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:</p> <p>If this argument is not specified, then it will apply the following functions according to the number of labels:</p> <ul> <li>If the model has a single label, will apply the sigmoid function on the output.</li> <li>If the model has several labels, will apply the softmax function on the output.</li> </ul> <p>Possible values are:</p> <ul> <li><code>&quot;sigmoid&quot;</code>: Applies the sigmoid function on the output.</li> <li><code>&quot;softmax&quot;</code>: Applies the softmax function on the output.</li> <li><code>&quot;none&quot;</code>: Does not apply any function on the output.</li> </ul>`,name:"function_to_apply"}],returnDescription:` <p>Each result comes as list of dictionaries with the following keys:</p> <ul> <li><strong>label</strong> (<code>str</code>) \u2014 The label predicted.</li> <li><strong>score</strong> (<code>float</code>) \u2014 The corresponding probability.</li> </ul> <p>If <code>self.return_all_scores=True</code>, one such dictionary is returned per label.</p> `,returnType:` <p>A list or a list of list of <code>dict</code></p> `}}),qs=new y({}),As=new w({props:{name:"class transformers.TextGenerationPipeline",anchor:"transformers.TextGenerationPipeline",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/text_generation.py#L20",parametersDescription:[{anchor:"transformers.TextGenerationPipeline.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.`,name:"model"},{anchor:"transformers.TextGenerationPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.`,name:"tokenizer"},{anchor:"transformers.TextGenerationPipeline.modelcard",description:`<strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code> <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.`,name:"modelcard"},{anchor:"transformers.TextGenerationPipeline.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.`,name:"framework"},{anchor:"transformers.TextGenerationPipeline.task",description:`<strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.`,name:"task"},{anchor:"transformers.TextGenerationPipeline.num_workers",description:`<strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.`,name:"num_workers"},{anchor:"transformers.TextGenerationPipeline.batch_size",description:`<strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .`,name:"batch_size"},{anchor:"transformers.TextGenerationPipeline.args_parser",description:`<strong>args_parser</strong> (<a href="/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.`,name:"args_parser"},{anchor:"transformers.TextGenerationPipeline.device",description:`<strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id.`,name:"device"},{anchor:"transformers.TextGenerationPipeline.binary_output",description:`<strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.`,name:"binary_output"}]}}),Is=new w({props:{name:"__call__",anchor:"transformers.TextGenerationPipeline.__call__",parameters:[{name:"text_inputs",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/text_generation.py#L136",parametersDescription:[{anchor:"transformers.TextGenerationPipeline.__call__.args",description:`<strong>args</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; One or several prompts (or one list of prompts) to complete.`,name:"args"},{anchor:"transformers.TextGenerationPipeline.__call__.return_tensors",description:`<strong>return_tensors</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to include the tensors of predictions (as token indices) in the outputs.`,name:"return_tensors"},{anchor:"transformers.TextGenerationPipeline.__call__.return_text",description:`<strong>return_text</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to include the decoded texts in the outputs.`,name:"return_text"},{anchor:"transformers.TextGenerationPipeline.__call__.return_full_text",description:`<strong>return_full_text</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>False</code> only added text is returned, otherwise the full text is returned Only meaningful if <em>return_text</em> is set to True.`,name:"return_full_text"},{anchor:"transformers.TextGenerationPipeline.__call__.clean_up_tokenization_spaces",description:`<strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to clean up the potential extra spaces in the text output.`,name:"clean_up_tokenization_spaces"},{anchor:"transformers.TextGenerationPipeline.__call__.prefix",description:`<strong>prefix</strong> (<code>str</code>, <em>optional</em>) &#x2014; Prefix added to prompt.`,name:"prefix"},{anchor:"transformers.TextGenerationPipeline.__call__.handle_long_generation",description:`<strong>handle_long_generation</strong> (<code>str</code>, <em>optional</em>) &#x2014; By default, this pipelines does not handle long generation (ones that exceed in one form or the other the model maximum length). There is no perfect way to adress this (more info :<a href="https://github.com/huggingface/transformers/issues/14033#issuecomment-948385227" rel="nofollow">https://github.com/huggingface/transformers/issues/14033#issuecomment-948385227</a>). This provides common strategies to work around that problem depending on your use case.</p> <ul> <li><code>None</code> : default strategy where nothing in particular happens</li> <li><code>&quot;hole&quot;</code>: Truncates left of input, and leaves a gap wide enough to let generation happen (might truncate a lot of the prompt and not suitable when generation exceed the model capacity)</li> </ul>`,name:"handle_long_generation"}],returnDescription:` <p>Each result comes as a dictionary with the following keys:</p> <ul> <li><strong>generated_text</strong> (<code>str</code>, present when <code>return_text=True</code>) \u2014 The generated text.</li> <li><strong>generated_token_ids</strong> (<code>torch.Tensor</code> or <code>tf.Tensor</code>, present when <code>return_tensors=True</code>) \u2014 The token ids of the generated text.</li> </ul> `,returnType:` <p>A list or a list of list of <code>dict</code></p> `}}),js=new y({}),Ss=new w({props:{name:"class transformers.Text2TextGenerationPipeline",anchor:"transformers.Text2TextGenerationPipeline",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/text2text_generation.py#L26",parametersDescription:[{anchor:"transformers.Text2TextGenerationPipeline.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.`,name:"model"},{anchor:"transformers.Text2TextGenerationPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.`,name:"tokenizer"},{anchor:"transformers.Text2TextGenerationPipeline.modelcard",description:`<strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code> <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.`,name:"modelcard"},{anchor:"transformers.Text2TextGenerationPipeline.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.`,name:"framework"},{anchor:"transformers.Text2TextGenerationPipeline.task",description:`<strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.`,name:"task"},{anchor:"transformers.Text2TextGenerationPipeline.num_workers",description:`<strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.`,name:"num_workers"},{anchor:"transformers.Text2TextGenerationPipeline.batch_size",description:`<strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .`,name:"batch_size"},{anchor:"transformers.Text2TextGenerationPipeline.args_parser",description:`<strong>args_parser</strong> (<a href="/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.`,name:"args_parser"},{anchor:"transformers.Text2TextGenerationPipeline.device",description:`<strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id.`,name:"device"},{anchor:"transformers.Text2TextGenerationPipeline.binary_output",description:`<strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.`,name:"binary_output"}]}}),Ls=new z({props:{code:`text2text_generator = pipeline("text2text-generation") text2text_generator("question: What is 42 ? context: 42 is the answer to life, the universe and everything")`,highlighted:`text2text_generator = pipeline(<span class="hljs-string">&quot;text2text-generation&quot;</span>) text2text_generator(<span class="hljs-string">&quot;question: What is 42 ? context: 42 is the answer to life, the universe and everything&quot;</span>)`}}),Us=new w({props:{name:"__call__",anchor:"transformers.Text2TextGenerationPipeline.__call__",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/text2text_generation.py#L109",parametersDescription:[{anchor:"transformers.Text2TextGenerationPipeline.__call__.args",description:`<strong>args</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; Input text for the encoder.`,name:"args"},{anchor:"transformers.Text2TextGenerationPipeline.__call__.return_tensors",description:`<strong>return_tensors</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to include the tensors of predictions (as token indices) in the outputs.`,name:"return_tensors"},{anchor:"transformers.Text2TextGenerationPipeline.__call__.return_text",description:`<strong>return_text</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to include the decoded texts in the outputs.`,name:"return_text"},{anchor:"transformers.Text2TextGenerationPipeline.__call__.clean_up_tokenization_spaces",description:`<strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to clean up the potential extra spaces in the text output.`,name:"clean_up_tokenization_spaces"},{anchor:"transformers.Text2TextGenerationPipeline.__call__.truncation",description:`<strong>truncation</strong> (<code>TruncationStrategy</code>, <em>optional</em>, defaults to <code>TruncationStrategy.DO_NOT_TRUNCATE</code>) &#x2014; The truncation strategy for the tokenization within the pipeline. <code>TruncationStrategy.DO_NOT_TRUNCATE</code> (default) will never truncate, but it is sometimes desirable to truncate the input to fit the model&#x2019;s max_length instead of throwing an error down the line. generate_kwargs &#x2014; Additional keyword arguments to pass along to the generate method of the model (see the generate method corresponding to your framework <a href="./model#generative-models">here</a>).`,name:"truncation"}],returnDescription:` <p>Each result comes as a dictionary with the following keys:</p> <ul> <li><strong>generated_text</strong> (<code>str</code>, present when <code>return_text=True</code>) \u2014 The generated text.</li> <li><strong>generated_token_ids</strong> (<code>torch.Tensor</code> or <code>tf.Tensor</code>, present when <code>return_tensors=True</code>) \u2014 The token ids of the generated text.</li> </ul> `,returnType:` <p>A list or a list of list of <code>dict</code></p> `}}),Gs=new w({props:{name:"check_inputs",anchor:"transformers.Text2TextGenerationPipeline.check_inputs",parameters:[{name:"input_length",val:": int"},{name:"min_length",val:": int"},{name:"max_length",val:": int"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/text2text_generation.py#L82"}}),Ns=new y({}),Os=new w({props:{name:"class transformers.TokenClassificationPipeline",anchor:"transformers.TokenClassificationPipeline",parameters:[{name:"args_parser",val:" = <transformers.pipelines.token_classification.TokenClassificationArgumentHandler object at 0x7f45b52f37c0>"},{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/token_classification.py#L86",parametersDescription:[{anchor:"transformers.TokenClassificationPipeline.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.`,name:"model"},{anchor:"transformers.TokenClassificationPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.`,name:"tokenizer"},{anchor:"transformers.TokenClassificationPipeline.modelcard",description:`<strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code> <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.`,name:"modelcard"},{anchor:"transformers.TokenClassificationPipeline.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.`,name:"framework"},{anchor:"transformers.TokenClassificationPipeline.task",description:`<strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.`,name:"task"},{anchor:"transformers.TokenClassificationPipeline.num_workers",description:`<strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.`,name:"num_workers"},{anchor:"transformers.TokenClassificationPipeline.batch_size",description:`<strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .`,name:"batch_size"},{anchor:"transformers.TokenClassificationPipeline.args_parser",description:`<strong>args_parser</strong> (<a href="/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.`,name:"args_parser"},{anchor:"transformers.TokenClassificationPipeline.device",description:`<strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id.`,name:"device"},{anchor:"transformers.TokenClassificationPipeline.binary_output",description:`<strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.`,name:"binary_output"},{anchor:"transformers.TokenClassificationPipeline.ignore_labels",description:`<strong>ignore_labels</strong> (<code>List[str]</code>, defaults to <code>[&quot;O&quot;]</code>) &#x2014; A list of labels to ignore.`,name:"ignore_labels"},{anchor:"transformers.TokenClassificationPipeline.grouped_entities",description:`<strong>grouped_entities</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; DEPRECATED, use <code>aggregation_strategy</code> instead. Whether or not to group the tokens corresponding to the same entity together in the predictions or not.`,name:"grouped_entities"},{anchor:"transformers.TokenClassificationPipeline.aggregation_strategy",description:`<strong>aggregation_strategy</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;none&quot;</code>) &#x2014; The strategy to fuse (or not) tokens based on the model prediction.</p> <ul> <li>&#x201C;none&#x201D; : Will simply not do any aggregation and simply return raw results from the model</li> <li>&#x201C;simple&#x201D; : Will attempt to group entities following the default schema. (A, B-TAG), (B, I-TAG), (C, I-TAG), (D, B-TAG2) (E, B-TAG2) will end up being [{&#x201C;word&#x201D;: ABC, &#x201C;entity&#x201D;: &#x201C;TAG&#x201D;}, {&#x201C;word&#x201D;: &#x201C;D&#x201D;, &#x201C;entity&#x201D;: &#x201C;TAG2&#x201D;}, {&#x201C;word&#x201D;: &#x201C;E&#x201D;, &#x201C;entity&#x201D;: &#x201C;TAG2&#x201D;}] Notice that two consecutive B tags will end up as different entities. On word based languages, we might end up splitting words undesirably : Imagine Microsoft being tagged as [{&#x201C;word&#x201D;: &#x201C;Micro&#x201D;, &#x201C;entity&#x201D;: &#x201C;ENTERPRISE&#x201D;}, {&#x201C;word&#x201D;: &#x201C;soft&#x201D;, &#x201C;entity&#x201D;: &#x201C;NAME&#x201D;}]. Look for FIRST, MAX, AVERAGE for ways to mitigate that and disambiguate words (on languages that support that meaning, which is basically tokens separated by a space). These mitigations will only work on real words, &#x201C;New york&#x201D; might still be tagged with two different entities.</li> <li>&#x201C;first&#x201D; : (works only on word based models) Will use the <code>SIMPLE</code> strategy except that words, cannot end up with different tags. Words will simply use the tag of the first token of the word when there is ambiguity.</li> <li>&#x201C;average&#x201D; : (works only on word based models) Will use the <code>SIMPLE</code> strategy except that words, cannot end up with different tags. scores will be averaged first across tokens, and then the maximum label is applied.</li> <li>&#x201C;max&#x201D; : (works only on word based models) Will use the <code>SIMPLE</code> strategy except that words, cannot end up with different tags. Word entity will simply be the token with the maximum score.</li> </ul>`,name:"aggregation_strategy"}]}}),Ws=new w({props:{name:"__call__",anchor:"transformers.TokenClassificationPipeline.__call__",parameters:[{name:"inputs",val:": typing.Union[str, typing.List[str]]"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/token_classification.py#L160",parametersDescription:[{anchor:"transformers.TokenClassificationPipeline.__call__.inputs",description:`<strong>inputs</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; One or several texts (or one list of texts) for token classification.`,name:"inputs"}],returnDescription:` <p>Each result comes as a list of dictionaries (one for each token in the corresponding input, or each entity if this pipeline was instantiated with an aggregation_strategy) with the following keys:</p> <ul> <li><strong>word</strong> (<code>str</code>) \u2014 The token/word classified.</li> <li><strong>score</strong> (<code>float</code>) \u2014 The corresponding probability for <code>entity</code>.</li> <li><strong>entity</strong> (<code>str</code>) \u2014 The entity predicted for that token/word (it is named <em>entity_group</em> when <em>aggregation_strategy</em> is not <code>"none"</code>.</li> <li><strong>index</strong> (<code>int</code>, only present when <code>aggregation_strategy="none"</code>) \u2014 The index of the corresponding token in the sentence.</li> <li><strong>start</strong> (<code>int</code>, <em>optional</em>) \u2014 The index of the start of the corresponding entity in the sentence. Only exists if the offsets are available within the tokenizer</li> <li><strong>end</strong> (<code>int</code>, <em>optional</em>) \u2014 The index of the end of the corresponding entity in the sentence. Only exists if the offsets are available within the tokenizer</li> </ul> `,returnType:` <p>A list or a list of list of <code>dict</code></p> `}}),Qs=new w({props:{name:"aggregate_words",anchor:"transformers.TokenClassificationPipeline.aggregate_words",parameters:[{name:"entities",val:": typing.List[dict]"},{name:"aggregation_strategy",val:": AggregationStrategy"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/token_classification.py#L366"}}),Vs=new w({props:{name:"gather_pre_entities",anchor:"transformers.TokenClassificationPipeline.gather_pre_entities",parameters:[{name:"sentence",val:": str"},{name:"input_ids",val:": ndarray"},{name:"scores",val:": ndarray"},{name:"offset_mapping",val:": typing.Union[typing.List[typing.Tuple[int, int]], NoneType]"},{name:"special_tokens_mask",val:": ndarray"},{name:"aggregation_strategy",val:": AggregationStrategy"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/token_classification.py#L252"}}),Bs=new w({props:{name:"group_entities",anchor:"transformers.TokenClassificationPipeline.group_entities",parameters:[{name:"entities",val:": typing.List[dict]"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/token_classification.py#L428",parametersDescription:[{anchor:"transformers.TokenClassificationPipeline.group_entities.entities",description:"<strong>entities</strong> (<code>dict</code>) &#x2014; The entities predicted by the pipeline.",name:"entities"}]}}),Zs=new w({props:{name:"group_sub_entities",anchor:"transformers.TokenClassificationPipeline.group_sub_entities",parameters:[{name:"entities",val:": typing.List[dict]"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/token_classification.py#L393",parametersDescription:[{anchor:"transformers.TokenClassificationPipeline.group_sub_entities.entities",description:"<strong>entities</strong> (<code>dict</code>) &#x2014; The entities predicted by the pipeline.",name:"entities"}]}}),Ys=new y({}),Xs=new w({props:{name:"class transformers.TranslationPipeline",anchor:"transformers.TranslationPipeline",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/text2text_generation.py#L253",parametersDescription:[{anchor:"transformers.TranslationPipeline.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.`,name:"model"},{anchor:"transformers.TranslationPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.`,name:"tokenizer"},{anchor:"transformers.TranslationPipeline.modelcard",description:`<strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code> <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.`,name:"modelcard"},{anchor:"transformers.TranslationPipeline.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.`,name:"framework"},{anchor:"transformers.TranslationPipeline.task",description:`<strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.`,name:"task"},{anchor:"transformers.TranslationPipeline.num_workers",description:`<strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.`,name:"num_workers"},{anchor:"transformers.TranslationPipeline.batch_size",description:`<strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .`,name:"batch_size"},{anchor:"transformers.TranslationPipeline.args_parser",description:`<strong>args_parser</strong> (<a href="/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.`,name:"args_parser"},{anchor:"transformers.TranslationPipeline.device",description:`<strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id.`,name:"device"},{anchor:"transformers.TranslationPipeline.binary_output",description:`<strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.`,name:"binary_output"}]}}),ea=new z({props:{code:`en_fr_translator = pipeline("translation_en_to_fr") en_fr_translator("How old are you?")`,highlighted:`en_fr_translator = pipeline(<span class="hljs-string">&quot;translation_en_to_fr&quot;</span>) en_fr_translator(<span class="hljs-string">&quot;How old are you?&quot;</span>)`}}),ta=new w({props:{name:"__call__",anchor:"transformers.TranslationPipeline.__call__",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/text2text_generation.py#L305",parametersDescription:[{anchor:"transformers.TranslationPipeline.__call__.args",description:`<strong>args</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; Texts to be translated.`,name:"args"},{anchor:"transformers.TranslationPipeline.__call__.return_tensors",description:`<strong>return_tensors</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to include the tensors of predictions (as token indices) in the outputs.`,name:"return_tensors"},{anchor:"transformers.TranslationPipeline.__call__.return_text",description:`<strong>return_text</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to include the decoded texts in the outputs.`,name:"return_text"},{anchor:"transformers.TranslationPipeline.__call__.clean_up_tokenization_spaces",description:`<strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to clean up the potential extra spaces in the text output.`,name:"clean_up_tokenization_spaces"},{anchor:"transformers.TranslationPipeline.__call__.src_lang",description:`<strong>src_lang</strong> (<code>str</code>, <em>optional</em>) &#x2014; The language of the input. Might be required for multilingual models. Will not have any effect for single pair translation models`,name:"src_lang"},{anchor:"transformers.TranslationPipeline.__call__.tgt_lang",description:`<strong>tgt_lang</strong> (<code>str</code>, <em>optional</em>) &#x2014; The language of the desired output. Might be required for multilingual models. Will not have any effect for single pair translation models generate_kwargs &#x2014; Additional keyword arguments to pass along to the generate method of the model (see the generate method corresponding to your framework <a href="./model#generative-models">here</a>).`,name:"tgt_lang"}],returnDescription:` <p>Each result comes as a dictionary with the following keys:</p> <ul> <li><strong>translation_text</strong> (<code>str</code>, present when <code>return_text=True</code>) \u2014 The translation.</li> <li><strong>translation_token_ids</strong> (<code>torch.Tensor</code> or <code>tf.Tensor</code>, present when <code>return_tensors=True</code>) \u2014 The token ids of the translation.</li> </ul> `,returnType:` <p>A list or a list of list of <code>dict</code></p> `}}),na=new y({}),ra=new w({props:{name:"class transformers.ZeroShotClassificationPipeline",anchor:"transformers.ZeroShotClassificationPipeline",parameters:[{name:"args_parser",val:" = <transformers.pipelines.zero_shot_classification.ZeroShotClassificationArgumentHandler object at 0x7f45b52fe1c0>"},{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/zero_shot_classification.py#L47",parametersDescription:[{anchor:"transformers.ZeroShotClassificationPipeline.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.`,name:"model"},{anchor:"transformers.ZeroShotClassificationPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.`,name:"tokenizer"},{anchor:"transformers.ZeroShotClassificationPipeline.modelcard",description:`<strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code> <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.`,name:"modelcard"},{anchor:"transformers.ZeroShotClassificationPipeline.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.`,name:"framework"},{anchor:"transformers.ZeroShotClassificationPipeline.task",description:`<strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.`,name:"task"},{anchor:"transformers.ZeroShotClassificationPipeline.num_workers",description:`<strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.`,name:"num_workers"},{anchor:"transformers.ZeroShotClassificationPipeline.batch_size",description:`<strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .`,name:"batch_size"},{anchor:"transformers.ZeroShotClassificationPipeline.args_parser",description:`<strong>args_parser</strong> (<a href="/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.`,name:"args_parser"},{anchor:"transformers.ZeroShotClassificationPipeline.device",description:`<strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id.`,name:"device"},{anchor:"transformers.ZeroShotClassificationPipeline.binary_output",description:`<strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.`,name:"binary_output"}]}}),ia=new w({props:{name:"__call__",anchor:"transformers.ZeroShotClassificationPipeline.__call__",parameters:[{name:"sequences",val:": typing.Union[str, typing.List[str]]"},{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/zero_shot_classification.py#L139",parametersDescription:[{anchor:"transformers.ZeroShotClassificationPipeline.__call__.sequences",description:`<strong>sequences</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; The sequence(s) to classify, will be truncated if the model input is too large.`,name:"sequences"},{anchor:"transformers.ZeroShotClassificationPipeline.__call__.candidate_labels",description:`<strong>candidate_labels</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; The set of possible class labels to classify each sequence into. Can be a single label, a string of comma-separated labels, or a list of labels.`,name:"candidate_labels"},{anchor:"transformers.ZeroShotClassificationPipeline.__call__.hypothesis_template",description:`<strong>hypothesis_template</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;This example is {}.&quot;</code>) &#x2014; The template used to turn each label into an NLI-style hypothesis. This template must include a {} or similar syntax for the candidate label to be inserted into the template. For example, the default template is <code>&quot;This example is {}.&quot;</code> With the candidate label <code>&quot;sports&quot;</code>, this would be fed into the model like <code>&quot;&lt;cls&gt; sequence to classify &lt;sep&gt; This example is sports . &lt;sep&gt;&quot;</code>. The default template works well in many cases, but it may be worthwhile to experiment with different templates depending on the task setting.`,name:"hypothesis_template"},{anchor:"transformers.ZeroShotClassificationPipeline.__call__.multi_label",description:`<strong>multi_label</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not multiple candidate labels can be true. If <code>False</code>, the scores are normalized such that the sum of the label likelihoods for each sequence is 1. If <code>True</code>, the labels are considered independent and probabilities are normalized for each candidate by doing a softmax of the entailment score vs. the contradiction score.`,name:"multi_label"}],returnDescription:` <p>Each result comes as a dictionary with the following keys:</p> <ul> <li><strong>sequence</strong> (<code>str</code>) \u2014 The sequence for which this is the output.</li> <li><strong>labels</strong> (<code>List[str]</code>) \u2014 The labels sorted by order of likelihood.</li> <li><strong>scores</strong> (<code>List[float]</code>) \u2014 The probabilities for each of the labels.</li> </ul> `,returnType:` <p>A <code>dict</code> or a list of <code>dict</code></p> `}}),pa=new y({}),ca=new w({props:{name:"class transformers.ZeroShotImageClassificationPipeline",anchor:"transformers.ZeroShotImageClassificationPipeline",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/zero_shot_image_classification.py#L29",parametersDescription:[{anchor:"transformers.ZeroShotImageClassificationPipeline.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.`,name:"model"},{anchor:"transformers.ZeroShotImageClassificationPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.`,name:"tokenizer"},{anchor:"transformers.ZeroShotImageClassificationPipeline.modelcard",description:`<strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code> <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.`,name:"modelcard"},{anchor:"transformers.ZeroShotImageClassificationPipeline.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.`,name:"framework"},{anchor:"transformers.ZeroShotImageClassificationPipeline.task",description:`<strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.`,name:"task"},{anchor:"transformers.ZeroShotImageClassificationPipeline.num_workers",description:`<strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.`,name:"num_workers"},{anchor:"transformers.ZeroShotImageClassificationPipeline.batch_size",description:`<strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .`,name:"batch_size"},{anchor:"transformers.ZeroShotImageClassificationPipeline.args_parser",description:`<strong>args_parser</strong> (<a href="/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.`,name:"args_parser"},{anchor:"transformers.ZeroShotImageClassificationPipeline.device",description:`<strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id.`,name:"device"},{anchor:"transformers.ZeroShotImageClassificationPipeline.binary_output",description:`<strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.`,name:"binary_output"}]}}),fa=new w({props:{name:"__call__",anchor:"transformers.ZeroShotImageClassificationPipeline.__call__",parameters:[{name:"images",val:": typing.Union[str, typing.List[str], ForwardRef('Image'), typing.List[ForwardRef('Image')]]"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/zero_shot_image_classification.py#L48",parametersDescription:[{anchor:"transformers.ZeroShotImageClassificationPipeline.__call__.images",description:`<strong>images</strong> (<code>str</code>, <code>List[str]</code>, <code>PIL.Image</code> or <code>List[PIL.Image]</code>) &#x2014; The pipeline handles three types of images:</p> <ul> <li>A string containing a http link pointing to an image</li> <li>A string containing a local path to an image</li> <li>An image loaded in PIL directly</li> </ul>`,name:"images"},{anchor:"transformers.ZeroShotImageClassificationPipeline.__call__.candidate_labels",description:`<strong>candidate_labels</strong> (<code>List[str]</code>) &#x2014; The candidate labels for this image`,name:"candidate_labels"},{anchor:"transformers.ZeroShotImageClassificationPipeline.__call__.hypothesis_template",description:`<strong>hypothesis_template</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;This is a photo of {}&quot;</code>) &#x2014; The sentence used in cunjunction with <em>candidate_labels</em> to attempt the image classification by replacing the placeholder with the candidate_labels. Then likelihood is estimated by using logits_per_image`,name:"hypothesis_template"}],returnDescription:` <p>A list of dictionaries containing result, one dictionnary per proposed label. The dictionaries contain the following keys:</p> <ul> <li><strong>label</strong> (<code>str</code>) \u2014 The label identified by the model. It is one of the suggested <code>candidate_label</code>.</li> <li><strong>score</strong> (<code>float</code>) \u2014 The score attributed by the model for that label (between 0 and 1).</li> </ul> `}}),ha=new y({}),ua=new w({props:{name:"class transformers.Pipeline",anchor:"transformers.Pipeline",parameters:[{name:"model",val:": typing.Union[ForwardRef('PreTrainedModel'), ForwardRef('TFPreTrainedModel')]"},{name:"tokenizer",val:": typing.Optional[transformers.tokenization_utils.PreTrainedTokenizer] = None"},{name:"feature_extractor",val:": typing.Optional[ForwardRef('SequenceFeatureExtractor')] = None"},{name:"modelcard",val:": typing.Optional[transformers.modelcard.ModelCard] = None"},{name:"framework",val:": typing.Optional[str] = None"},{name:"task",val:": str = ''"},{name:"args_parser",val:": ArgumentHandler = None"},{name:"device",val:": int = -1"},{name:"binary_output",val:": bool = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/base.py#L711",parametersDescription:[{anchor:"transformers.Pipeline.model",description:`<strong>model</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.`,name:"model"},{anchor:"transformers.Pipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.`,name:"tokenizer"},{anchor:"transformers.Pipeline.modelcard",description:`<strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code> <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.`,name:"modelcard"},{anchor:"transformers.Pipeline.framework",description:`<strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.`,name:"framework"},{anchor:"transformers.Pipeline.task",description:`<strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.`,name:"task"},{anchor:"transformers.Pipeline.num_workers",description:`<strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.`,name:"num_workers"},{anchor:"transformers.Pipeline.batch_size",description:`<strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .`,name:"batch_size"},{anchor:"transformers.Pipeline.args_parser",description:`<strong>args_parser</strong> (<a href="/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.`,name:"args_parser"},{anchor:"transformers.Pipeline.device",description:`<strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id.`,name:"device"},{anchor:"transformers.Pipeline.binary_output",description:`<strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.`,name:"binary_output"}]}}),ga=new w({props:{name:"check_model_type",anchor:"transformers.Pipeline.check_model_type",parameters:[{name:"supported_models",val:": typing.Union[typing.List[str], dict]"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/base.py#L864",parametersDescription:[{anchor:"transformers.Pipeline.check_model_type.supported_models",description:`<strong>supported_models</strong> (<code>List[str]</code> or <code>dict</code>) &#x2014; The list of models supported by the pipeline, or a dictionary with model class values.`,name:"supported_models"}]}}),_a=new w({props:{name:"device_placement",anchor:"transformers.Pipeline.device_placement",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/base.py#L806",returnDescription:` <p>Context manager</p> `}}),va=new z({props:{code:`# Explicitly ask for tensor allocation on CUDA device :0 pipe = pipeline(..., device=0) with pipe.device_placement(): # Every framework specific tensor allocation will be done on the request device output = pipe(...)`,highlighted:`<span class="hljs-comment"># Explicitly ask for tensor allocation on CUDA device :0</span> pipe = pipeline(..., device=<span class="hljs-number">0</span>) <span class="hljs-keyword">with</span> pipe.device_placement(): <span class="hljs-comment"># Every framework specific tensor allocation will be done on the request device</span> output = pipe(...)`}}),wa=new w({props:{name:"ensure_tensor_on_device",anchor:"transformers.Pipeline.ensure_tensor_on_device",parameters:[{name:"**inputs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/base.py#L832",parametersDescription:[{anchor:"transformers.Pipeline.ensure_tensor_on_device.inputs",description:`<strong>inputs</strong> (keyword arguments that should be <code>torch.Tensor</code>, the rest is ignored) &#x2014; The tensors to place on <code>self.device</code>.`,name:"inputs"},{anchor:"transformers.Pipeline.ensure_tensor_on_device.Recursive",description:"<strong>Recursive</strong> on lists <strong>only</strong>. &#x2014;",name:"Recursive"}],returnDescription:` <p>The same as <code>inputs</code> but on the proper device.</p> `,returnType:` <p><code>Dict[str, torch.Tensor]</code></p> `}}),ba=new w({props:{name:"postprocess",anchor:"transformers.Pipeline.postprocess",parameters:[{name:"model_outputs",val:": ModelOutput"},{name:"**postprocess_parameters",val:": typing.Dict"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/base.py#L920"}}),Pa=new w({props:{name:"predict",anchor:"transformers.Pipeline.predict",parameters:[{name:"X",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/base.py#L800"}}),ya=new w({props:{name:"preprocess",anchor:"transformers.Pipeline.preprocess",parameters:[{name:"input_",val:": typing.Any"},{name:"**preprocess_parameters",val:": typing.Dict"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/base.py#L899"}}),xa=new w({props:{name:"save_pretrained",anchor:"transformers.Pipeline.save_pretrained",parameters:[{name:"save_directory",val:": str"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/base.py#L770",parametersDescription:[{anchor:"transformers.Pipeline.save_pretrained.save_directory",description:`<strong>save_directory</strong> (<code>str</code>) &#x2014; A path to the directory where to saved. It will be created if it doesn&#x2019;t exist.`,name:"save_directory"}]}}),$a=new w({props:{name:"transform",anchor:"transformers.Pipeline.transform",parameters:[{name:"X",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/base.py#L794"}}),{c(){x=r("meta"),ie=l(),$=r("h1"),A=r("a"),fe=r("span"),f(E.$$.fragment),O=l(),Te=r("span"),Dg=a("Pipelines"),Vm=l(),Qt=r("p"),Ig=a(`The pipelines are a great and easy way to use models for inference. These pipelines are objects that abstract most of the complex code from the library, offering a simple API dedicated to several tasks, including Named Entity Recognition, Masked Language Modeling, Sentiment Analysis, Feature Extraction and Question Answering. See the `),Ca=r("a"),jg=a("task summary"),Sg=a(" for examples of use."),Bm=l(),za=r("p"),Mg=a("There are two categories of pipeline abstractions to be aware about:"),Zm=l(),Vt=r("ul"),yl=r("li"),wr=r("p"),Fg=a("The "),Da=r("a"),Lg=a("pipeline()"),Ug=a(" which is the most powerful object encapsulating all other pipelines."),Gg=l(),br=r("li"),xl=r("p"),Ng=a("The other task-specific pipelines:"),Og=l(),b=r("ul"),$l=r("li"),Ia=r("a"),Rg=a("AudioClassificationPipeline"),Hg=l(),El=r("li"),ja=r("a"),Wg=a("AutomaticSpeechRecognitionPipeline"),Qg=l(),ql=r("li"),Sa=r("a"),Vg=a("ConversationalPipeline"),Bg=l(),Al=r("li"),Ma=r("a"),Zg=a("FeatureExtractionPipeline"),Yg=l(),Cl=r("li"),Fa=r("a"),Xg=a("FillMaskPipeline"),Kg=l(),zl=r("li"),La=r("a"),Jg=a("ImageClassificationPipeline"),e_=l(),Dl=r("li"),Ua=r("a"),t_=a("ImageSegmentationPipeline"),n_=l(),Il=r("li"),Ga=r("a"),r_=a("ObjectDetectionPipeline"),o_=l(),jl=r("li"),Na=r("a"),s_=a("QuestionAnsweringPipeline"),a_=l(),Sl=r("li"),Oa=r("a"),i_=a("SummarizationPipeline"),l_=l(),Ml=r("li"),Ra=r("a"),p_=a("TableQuestionAnsweringPipeline"),c_=l(),Fl=r("li"),Ha=r("a"),d_=a("TextClassificationPipeline"),m_=l(),Ll=r("li"),Wa=r("a"),f_=a("TextGenerationPipeline"),h_=l(),Ul=r("li"),Qa=r("a"),u_=a("Text2TextGenerationPipeline"),g_=l(),Gl=r("li"),Va=r("a"),__=a("TokenClassificationPipeline"),v_=l(),Nl=r("li"),Ba=r("a"),w_=a("TranslationPipeline"),b_=l(),Ol=r("li"),Za=r("a"),k_=a("ZeroShotClassificationPipeline"),P_=l(),Rl=r("li"),Ya=r("a"),T_=a("ZeroShotImageClassificationPipeline"),Ym=l(),Ve=r("h2"),Bt=r("a"),Hl=r("span"),f(kr.$$.fragment),y_=l(),Wl=r("span"),x_=a("The pipeline abstraction"),Xm=l(),Zt=r("p"),$_=a("The "),Ql=r("em"),E_=a("pipeline"),q_=a(` abstraction is a wrapper around all the other available pipelines. It is instantiated as any other pipeline but can provide additional quality of life.`),Km=l(),Xa=r("p"),A_=a("Simple call on one item:"),Jm=l(),f(Pr.$$.fragment),ef=l(),Yt=r("p"),C_=a("If you want to use a specific model from the "),Tr=r("a"),z_=a("hub"),D_=a(` you can ignore the task if the model on the hub already defines it:`),tf=l(),f(yr.$$.fragment),nf=l(),Xt=r("p"),I_=a("To call a pipeline on many items, you can either call with a "),Vl=r("em"),j_=a("list"),S_=a("."),rf=l(),f(xr.$$.fragment),of=l(),Kt=r("p"),M_=a("To iterate of full datasets it is recommended to use a "),Bl=r("code"),F_=a("dataset"),L_=a(` directly. This means you don\u2019t need to allocate the whole dataset at once, nor do you need to do batching yourself. This should work just as fast as custom loops on GPU. If it doesn\u2019t don\u2019t hesitate to create an issue.`),sf=l(),f($r.$$.fragment),af=l(),Ka=r("p"),U_=a("For ease of use, a generator is also possible:"),lf=l(),f(Er.$$.fragment),pf=l(),R=r("div"),f(qr.$$.fragment),G_=l(),Ar=r("p"),N_=a("Utility factory method to build a "),Ja=r("a"),O_=a("Pipeline"),R_=a("."),H_=l(),Zl=r("p"),W_=a("Pipelines are made of:"),Q_=l(),Be=r("ul"),Cr=r("li"),V_=a("A "),ei=r("a"),B_=a("tokenizer"),Z_=a(" in charge of mapping raw textual input to token."),Y_=l(),zr=r("li"),X_=a("A "),ti=r("a"),K_=a("model"),J_=a(" to make predictions from the inputs."),e1=l(),Yl=r("li"),t1=a("Some (optional) post processing for enhancing model\u2019s output."),n1=l(),Xl=r("p"),r1=a("Examples:"),o1=l(),f(Dr.$$.fragment),cf=l(),Ze=r("h2"),Jt=r("a"),Kl=r("span"),f(Ir.$$.fragment),s1=l(),Jl=r("span"),a1=a("Pipeline batching"),df=l(),qe=r("p"),i1=a(`All pipelines can use batching. This will work whenever the pipeline uses its streaming ability (so when passing lists or `),ep=r("code"),l1=a("Dataset"),p1=a(" or "),tp=r("code"),c1=a("generator"),d1=a(")."),mf=l(),f(jr.$$.fragment),ff=l(),f(en.$$.fragment),hf=l(),f(Sr.$$.fragment),uf=l(),f(Mr.$$.fragment),gf=l(),ni=r("p"),m1=a("Example where it\u2019s most a slowdown:"),_f=l(),f(Fr.$$.fragment),vf=l(),tn=r("p"),f1=a("This is a occasional very long sentence compared to the other. In that case, the "),np=r("strong"),h1=a("whole"),u1=a(` batch will need to be 400 tokens long, so the whole batch will be [64, 400] instead of [64, 4], leading to the high slowdown. Even worse, on bigger batches, the program simply crashes.`),wf=l(),f(Lr.$$.fragment),bf=l(),ri=r("p"),g1=a(`There are no good (general) solutions for this problem, and your mileage may vary depending on your use cases. Rule of thumb:`),kf=l(),oi=r("p"),_1=a("For users, a rule of thumb is:"),Pf=l(),le=r("ul"),rp=r("li"),op=r("p"),sp=r("strong"),v1=a(`Measure performance on your load, with your hardware. Measure, measure, and keep measuring. Real numbers are the only way to go.`),w1=l(),ap=r("li"),ip=r("p"),b1=a("If you are latency constrained (live product doing inference), don\u2019t batch"),k1=l(),lp=r("li"),pp=r("p"),P1=a("If you are using CPU, don\u2019t batch."),T1=l(),Ur=r("li"),cp=r("p"),y1=a("If you are using throughput (you want to run your model on a bunch of static data), on GPU, then:"),x1=l(),Ye=r("ul"),dp=r("li"),$1=a(`If you have no clue about the size of the sequence_length (\u201Cnatural\u201D data), by default don\u2019t batch, measure and try tentatively to add it, add OOM checks to recover when it will fail (and it will at some point if you don\u2019t control the sequence_length.)`),E1=l(),mp=r("li"),q1=a(`If your sequence_length is super regular, then batching is more likely to be VERY interesting, measure and push it until you get OOMs.`),A1=l(),fp=r("li"),C1=a("The larger the GPU the more likely batching is going to be more interesting"),z1=l(),hp=r("li"),up=r("p"),D1=a("As soon as you enable batching, make sure you can handle OOMs nicely."),Tf=l(),Xe=r("h2"),nn=r("a"),gp=r("span"),f(Gr.$$.fragment),I1=l(),_p=r("span"),j1=a("Pipeline chunk batching"),yf=l(),ye=r("p"),vp=r("code"),S1=a("zero-shot-classification"),M1=a(" and "),wp=r("code"),F1=a("question-answering"),L1=a(` are slightly specific in the sense, that a single input might yield multiple forward pass of a model. Under normal circumstances, this would yield issues with `),bp=r("code"),U1=a("batch_size"),G1=a(" argument."),xf=l(),Ae=r("p"),N1=a("In order to circumvent this issue, both of these pipelines are a bit specific, they are "),kp=r("code"),O1=a("ChunkPipeline"),R1=a(` instead of regular `),Pp=r("code"),H1=a("Pipeline"),W1=a(". In short:"),$f=l(),f(Nr.$$.fragment),Ef=l(),si=r("p"),Q1=a("Now becomes:"),qf=l(),f(Or.$$.fragment),Af=l(),ai=r("p"),V1=a(`This should be very transparent to your code because the pipelines are used in the same way.`),Cf=l(),rn=r("p"),B1=a(`This is a simplified view, since the pipeline can handle automatically the batch to ! Meaning you don\u2019t have to care about how many forward passes you inputs are actually going to trigger, you can optimize the `),Tp=r("code"),Z1=a("batch_size"),Y1=a(` independently of the inputs. The caveats from the previous section still apply.`),zf=l(),Ke=r("h2"),on=r("a"),yp=r("span"),f(Rr.$$.fragment),X1=l(),xp=r("span"),K1=a("Pipeline custom code"),Df=l(),ii=r("p"),J1=a("If you want to override a specific pipeline."),If=l(),sn=r("p"),ev=a(`Don\u2019t hesitate to create an issue for your task at hand, the goal of the pipeline is to be easy to use and support most cases, so `),$p=r("code"),tv=a("transformers"),nv=a(" could maybe support your use case."),jf=l(),li=r("p"),rv=a("If you want to try simply you can:"),Sf=l(),pi=r("ul"),Ep=r("li"),ov=a("Subclass your pipeline of choice"),Mf=l(),f(Hr.$$.fragment),Ff=l(),ci=r("p"),sv=a("That should enable you to do all the custom code you want."),Lf=l(),Je=r("h2"),an=r("a"),qp=r("span"),f(Wr.$$.fragment),av=l(),Ap=r("span"),iv=a("Implementing a pipeline"),Uf=l(),di=r("p"),mi=r("a"),lv=a("Implementing a new pipeline"),Gf=l(),et=r("h2"),ln=r("a"),Cp=r("span"),f(Qr.$$.fragment),pv=l(),zp=r("span"),cv=a("The task specific pipelines"),Nf=l(),tt=r("h3"),pn=r("a"),Dp=r("span"),f(Vr.$$.fragment),dv=l(),Ip=r("span"),mv=a("AudioClassificationPipeline"),Of=l(),J=r("div"),f(Br.$$.fragment),fv=l(),Zr=r("p"),hv=a("Audio classification pipeline using any "),jp=r("code"),uv=a("AutoModelForAudioClassification"),gv=a(`. This pipeline predicts the class of a raw waveform or an audio file. In case of an audio file, ffmpeg should be installed to support multiple audio formats.`),_v=l(),nt=r("p"),vv=a("This pipeline can currently be loaded from "),fi=r("a"),wv=a("pipeline()"),bv=a(` using the following task identifier: `),Sp=r("code"),kv=a('"audio-classification"'),Pv=a("."),Tv=l(),Yr=r("p"),yv=a(`See the list of available models on `),Xr=r("a"),xv=a("huggingface.co/models"),$v=a("."),Ev=l(),cn=r("div"),f(Kr.$$.fragment),qv=l(),Jr=r("p"),Av=a("Classify the sequence(s) given as inputs. See the "),hi=r("a"),Cv=a("AutomaticSpeechRecognitionPipeline"),zv=a(` documentation for more information.`),Rf=l(),rt=r("h3"),dn=r("a"),Mp=r("span"),f(eo.$$.fragment),Dv=l(),Fp=r("span"),Iv=a("AutomaticSpeechRecognitionPipeline"),Hf=l(),he=r("div"),f(to.$$.fragment),jv=l(),Lp=r("p"),Sv=a("Pipeline that aims at extracting spoken text contained within some audio."),Mv=l(),Up=r("p"),Fv=a(`The input can be either a raw waveform or a audio file. In case of the audio file, ffmpeg should be installed for to support multiple audio formats`),Lv=l(),mn=r("div"),f(no.$$.fragment),Uv=l(),ro=r("p"),Gv=a("Classify the sequence(s) given as inputs. See the "),ui=r("a"),Nv=a("AutomaticSpeechRecognitionPipeline"),Ov=a(` documentation for more information.`),Wf=l(),ot=r("h3"),fn=r("a"),Gp=r("span"),f(oo.$$.fragment),Rv=l(),Np=r("span"),Hv=a("ConversationalPipeline"),Qf=l(),D=r("div"),f(so.$$.fragment),Wv=l(),xe=r("p"),Qv=a(`Utility class containing a conversation and its history. This class is meant to be used as an input to the `),gi=r("a"),Vv=a("ConversationalPipeline"),Bv=a(`. The conversation contains a number of utility function to manage the addition of new user input and generated model responses. A conversation needs to contain an unprocessed user input before being passed to the `),_i=r("a"),Zv=a("ConversationalPipeline"),Yv=a(`. This user input is either created when the class is instantiated, or by calling `),Op=r("code"),Xv=a('conversational_pipeline.append_response("input")'),Kv=a(" after a conversation turn."),Jv=l(),Rp=r("p"),ew=a("Usage:"),tw=l(),f(ao.$$.fragment),nw=l(),hn=r("div"),f(io.$$.fragment),rw=l(),lo=r("p"),ow=a("Add a user input to the conversation for the next round. This populates the internal "),Hp=r("code"),sw=a("new_user_input"),aw=a(" field."),iw=l(),un=r("div"),f(po.$$.fragment),lw=l(),Wp=r("p"),pw=a("Append a response to the list of generated responses."),cw=l(),Ce=r("div"),f(co.$$.fragment),dw=l(),Qp=r("p"),mw=a("Iterates over all blobs of the conversation."),fw=l(),ue=r("p"),hw=a("Returns: Iterator of (is_user, text_chunk) in chronological order of the conversation. "),Vp=r("code"),uw=a("is_user"),gw=a(" is a "),Bp=r("code"),_w=a("bool"),vw=a(`, `),Zp=r("code"),ww=a("text_chunks"),bw=a(" is a "),Yp=r("code"),kw=a("str"),Pw=a("."),Tw=l(),gn=r("div"),f(mo.$$.fragment),yw=l(),$e=r("p"),xw=a("Mark the conversation as processed (moves the content of "),Xp=r("code"),$w=a("new_user_input"),Ew=a(" to "),Kp=r("code"),qw=a("past_user_inputs"),Aw=a(`) and empties the `),Jp=r("code"),Cw=a("new_user_input"),zw=a(" field."),Vf=l(),M=r("div"),f(fo.$$.fragment),Dw=l(),ec=r("p"),Iw=a("Multi-turn conversational pipeline."),jw=l(),st=r("p"),Sw=a("This conversational pipeline can currently be loaded from "),vi=r("a"),Mw=a("pipeline()"),Fw=a(` using the following task identifier: `),tc=r("code"),Lw=a('"conversational"'),Uw=a("."),Gw=l(),ge=r("p"),Nw=a(`The models that this pipeline can use are models that have been fine-tuned on a multi-turn conversational task, currently: `),nc=r("em"),Ow=a("\u2018microsoft/DialoGPT-small\u2019"),Rw=a(", "),rc=r("em"),Hw=a("\u2018microsoft/DialoGPT-medium\u2019"),Ww=a(", "),oc=r("em"),Qw=a("\u2018microsoft/DialoGPT-large\u2019"),Vw=a(`. See the up-to-date list of available models on `),ho=r("a"),Bw=a("huggingface.co/models"),Zw=a("."),Yw=l(),sc=r("p"),Xw=a("Usage:"),Kw=l(),f(uo.$$.fragment),Jw=l(),_n=r("div"),f(go.$$.fragment),eb=l(),ac=r("p"),tb=a("Generate responses for the conversation(s) given as inputs."),Bf=l(),at=r("h3"),vn=r("a"),ic=r("span"),f(_o.$$.fragment),nb=l(),lc=r("span"),rb=a("FeatureExtractionPipeline"),Zf=l(),ee=r("div"),f(vo.$$.fragment),ob=l(),pc=r("p"),sb=a(`Feature extraction pipeline using no model head. This pipeline extracts the hidden states from the base transformer, which can be used as features in downstream tasks.`),ab=l(),it=r("p"),ib=a("This feature extraction pipeline can currently be loaded from "),wi=r("a"),lb=a("pipeline()"),pb=a(` using the task identifier: `),cc=r("code"),cb=a('"feature-extraction"'),db=a("."),mb=l(),wo=r("p"),fb=a(`All models may be used for this pipeline. See a list of all models, including community-contributed models on `),bo=r("a"),hb=a("huggingface.co/models"),ub=a("."),gb=l(),wn=r("div"),f(ko.$$.fragment),_b=l(),dc=r("p"),vb=a("Extract the features of the input(s)."),Yf=l(),lt=r("h3"),bn=r("a"),mc=r("span"),f(Po.$$.fragment),wb=l(),fc=r("span"),bb=a("FillMaskPipeline"),Xf=l(),H=r("div"),f(To.$$.fragment),kb=l(),pt=r("p"),Pb=a("Masked language modeling prediction pipeline using any "),hc=r("code"),Tb=a("ModelWithLMHead"),yb=a(". See the "),bi=r("a"),xb=a(`masked language modeling examples`),$b=a(" for more information."),Eb=l(),ct=r("p"),qb=a("This mask filling pipeline can currently be loaded from "),ki=r("a"),Ab=a("pipeline()"),Cb=a(` using the following task identifier: `),uc=r("code"),zb=a('"fill-mask"'),Db=a("."),Ib=l(),yo=r("p"),jb=a(`The models that this pipeline can use are models that have been trained with a masked language modeling objective, which includes the bi-directional models in the library. See the up-to-date list of available models on `),xo=r("a"),Sb=a("huggingface.co/models"),Mb=a("."),Fb=l(),f(kn.$$.fragment),Lb=l(),Pn=r("div"),f($o.$$.fragment),Ub=l(),gc=r("p"),Gb=a("Fill the masked token in the text(s) given as inputs."),Kf=l(),dt=r("h3"),Tn=r("a"),_c=r("span"),f(Eo.$$.fragment),Nb=l(),vc=r("span"),Ob=a("ImageClassificationPipeline"),Jf=l(),te=r("div"),f(qo.$$.fragment),Rb=l(),Ao=r("p"),Hb=a("Image classification pipeline using any "),wc=r("code"),Wb=a("AutoModelForImageClassification"),Qb=a(`. This pipeline predicts the class of an image.`),Vb=l(),mt=r("p"),Bb=a("This image classification pipeline can currently be loaded from "),Pi=r("a"),Zb=a("pipeline()"),Yb=a(` using the following task identifier: `),bc=r("code"),Xb=a('"image-classification"'),Kb=a("."),Jb=l(),Co=r("p"),ek=a(`See the list of available models on `),zo=r("a"),tk=a("huggingface.co/models"),nk=a("."),rk=l(),yn=r("div"),f(Do.$$.fragment),ok=l(),kc=r("p"),sk=a("Assign labels to the image(s) passed as inputs."),eh=l(),ft=r("h3"),xn=r("a"),Pc=r("span"),f(Io.$$.fragment),ak=l(),Tc=r("span"),ik=a("ImageSegmentationPipeline"),th=l(),ne=r("div"),f(jo.$$.fragment),lk=l(),So=r("p"),pk=a("Image segmentation pipeline using any "),yc=r("code"),ck=a("AutoModelForXXXSegmentation"),dk=a(`. This pipeline predicts masks of objects and their classes.`),mk=l(),ht=r("p"),fk=a("This image segmentation pipeline can currently be loaded from "),Ti=r("a"),hk=a("pipeline()"),uk=a(` using the following task identifier: `),xc=r("code"),gk=a('"image-segmentation"'),_k=a("."),vk=l(),Mo=r("p"),wk=a(`See the list of available models on `),Fo=r("a"),bk=a("huggingface.co/models"),kk=a("."),Pk=l(),$n=r("div"),f(Lo.$$.fragment),Tk=l(),$c=r("p"),yk=a("Perform segmentation (detect masks & classes) in the image(s) passed as inputs."),nh=l(),ut=r("h3"),En=r("a"),Ec=r("span"),f(Uo.$$.fragment),xk=l(),qc=r("span"),$k=a("NerPipeline"),rh=l(),I=r("div"),f(Go.$$.fragment),Ek=l(),gt=r("p"),qk=a("Named Entity Recognition pipeline using any "),Ac=r("code"),Ak=a("ModelForTokenClassification"),Ck=a(". See the "),yi=r("a"),zk=a(`named entity recognition examples`),Dk=a(" for more information."),Ik=l(),_t=r("p"),jk=a("This token recognition pipeline can currently be loaded from "),xi=r("a"),Sk=a("pipeline()"),Mk=a(` using the following task identifier: `),Cc=r("code"),Fk=a('"ner"'),Lk=a(" (for predicting the classes of tokens in a sequence: person, organisation, location or miscellaneous)."),Uk=l(),No=r("p"),Gk=a(`The models that this pipeline can use are models that have been fine-tuned on a token classification task. See the up-to-date list of available models on `),Oo=r("a"),Nk=a("huggingface.co/models"),Ok=a("."),Rk=l(),ze=r("div"),f(Ro.$$.fragment),Hk=l(),zc=r("p"),Wk=a("Override tokens from a given word that disagree to force agreement on word boundaries."),Qk=l(),Dc=r("p"),Vk=a(`Example: micro|soft| com|pany| B-ENT I-NAME I-ENT I-ENT will be rewritten with first strategy as microsoft| company| B-ENT I-ENT`),Bk=l(),qn=r("div"),f(Ho.$$.fragment),Zk=l(),Ic=r("p"),Yk=a("Fuse various numpy arrays into dicts with all the information needed for aggregation"),Xk=l(),An=r("div"),f(Wo.$$.fragment),Kk=l(),jc=r("p"),Jk=a("Find and group together the adjacent tokens with the same entity predicted."),eP=l(),Cn=r("div"),f(Qo.$$.fragment),tP=l(),Sc=r("p"),nP=a("Group together the adjacent tokens with the same entity predicted."),oh=l(),zn=r("p"),rP=a("See "),$i=r("a"),oP=a("TokenClassificationPipeline"),sP=a(" for all details."),sh=l(),vt=r("h3"),Dn=r("a"),Mc=r("span"),f(Vo.$$.fragment),aP=l(),Fc=r("span"),iP=a("ObjectDetectionPipeline"),ah=l(),re=r("div"),f(Bo.$$.fragment),lP=l(),Zo=r("p"),pP=a("Object detection pipeline using any "),Lc=r("code"),cP=a("AutoModelForObjectDetection"),dP=a(`. This pipeline predicts bounding boxes of objects and their classes.`),mP=l(),wt=r("p"),fP=a("This object detection pipeline can currently be loaded from "),Ei=r("a"),hP=a("pipeline()"),uP=a(` using the following task identifier: `),Uc=r("code"),gP=a('"object-detection"'),_P=a("."),vP=l(),Yo=r("p"),wP=a("See the list of available models on "),Xo=r("a"),bP=a("huggingface.co/models"),kP=a("."),PP=l(),In=r("div"),f(Ko.$$.fragment),TP=l(),Gc=r("p"),yP=a("Detect objects (bounding boxes & classes) in the image(s) passed as inputs."),ih=l(),bt=r("h3"),jn=r("a"),Nc=r("span"),f(Jo.$$.fragment),xP=l(),Oc=r("span"),$P=a("QuestionAnsweringPipeline"),lh=l(),j=r("div"),f(es.$$.fragment),EP=l(),kt=r("p"),qP=a("Question Answering pipeline using any "),Rc=r("code"),AP=a("ModelForQuestionAnswering"),CP=a(". See the "),qi=r("a"),zP=a(`question answering examples`),DP=a(" for more information."),IP=l(),Pt=r("p"),jP=a("This question answering pipeline can currently be loaded from "),Ai=r("a"),SP=a("pipeline()"),MP=a(` using the following task identifier: `),Hc=r("code"),FP=a('"question-answering"'),LP=a("."),UP=l(),ts=r("p"),GP=a(`The models that this pipeline can use are models that have been fine-tuned on a question answering task. See the up-to-date list of available models on `),ns=r("a"),NP=a("huggingface.co/models"),OP=a("."),RP=l(),Sn=r("div"),f(rs.$$.fragment),HP=l(),Wc=r("p"),WP=a("Answer the question(s) given as inputs by using the context(s)."),QP=l(),De=r("div"),f(os.$$.fragment),VP=l(),Mn=r("p"),BP=a("QuestionAnsweringPipeline leverages the "),Qc=r("code"),ZP=a("SquadExample"),YP=a(`internally. This helper method encapsulate all the logic for converting question(s) and context(s) to `),Vc=r("code"),XP=a("SquadExample"),KP=l(),Bc=r("p"),JP=a("We currently support extractive question answering."),eT=l(),Ie=r("div"),f(ss.$$.fragment),tT=l(),as=r("p"),nT=a("Take the output of any "),Zc=r("code"),rT=a("ModelForQuestionAnswering"),oT=a(` and will generate probabilities for each span to be the actual answer.`),sT=l(),Yc=r("p"),aT=a(`In addition, it filters out some unwanted/impossible cases like answer len being greater than max_answer_len or answer end position being before the starting position. The method supports output the k-best answer through the topk argument.`),iT=l(),Fn=r("div"),f(is.$$.fragment),lT=l(),Xc=r("p"),pT=a("When decoding from token probabilities, this method maps token indexes to actual word in the initial context."),ph=l(),Tt=r("h3"),Ln=r("a"),Kc=r("span"),f(ls.$$.fragment),cT=l(),Jc=r("span"),dT=a("SummarizationPipeline"),ch=l(),F=r("div"),f(ps.$$.fragment),mT=l(),ed=r("p"),fT=a("Summarize news articles and other documents."),hT=l(),yt=r("p"),uT=a("This summarizing pipeline can currently be loaded from "),Ci=r("a"),gT=a("pipeline()"),_T=a(` using the following task identifier: `),td=r("code"),vT=a('"summarization"'),wT=a("."),bT=l(),L=r("p"),kT=a(`The models that this pipeline can use are models that have been fine-tuned on a summarization task, which is currently, \u2019`),nd=r("em"),PT=a("bart-large-cnn"),TT=a("\u2019, \u2019"),rd=r("em"),yT=a("t5-small"),xT=a("\u2019, \u2019"),od=r("em"),$T=a("t5-base"),ET=a("\u2019, \u2019"),sd=r("em"),qT=a("t5-large"),AT=a("\u2019, \u2019"),ad=r("em"),CT=a("t5-3b"),zT=a("\u2019, \u2019"),id=r("em"),DT=a("t5-11b"),IT=a(`\u2019. See the up-to-date list of available models on `),cs=r("a"),jT=a("huggingface.co/models"),ST=a("."),MT=l(),ld=r("p"),FT=a("Usage:"),LT=l(),f(ds.$$.fragment),UT=l(),Un=r("div"),f(ms.$$.fragment),GT=l(),pd=r("p"),NT=a("Summarize the text(s) given as inputs."),dh=l(),xt=r("h3"),Gn=r("a"),cd=r("span"),f(fs.$$.fragment),OT=l(),dd=r("span"),RT=a("TableQuestionAnsweringPipeline"),mh=l(),oe=r("div"),f(hs.$$.fragment),HT=l(),us=r("p"),WT=a("Table Question Answering pipeline using a "),md=r("code"),QT=a("ModelForTableQuestionAnswering"),VT=a(`. This pipeline is only available in PyTorch.`),BT=l(),$t=r("p"),ZT=a("This tabular question answering pipeline can currently be loaded from "),zi=r("a"),YT=a("pipeline()"),XT=a(` using the following task identifier: `),fd=r("code"),KT=a('"table-question-answering"'),JT=a("."),ey=l(),gs=r("p"),ty=a(`The models that this pipeline can use are models that have been fine-tuned on a tabular question answering task. See the up-to-date list of available models on `),_s=r("a"),ny=a("huggingface.co/models"),ry=a("."),oy=l(),C=r("div"),f(vs.$$.fragment),sy=l(),hd=r("p"),ay=a("Answers queries according to a table. The pipeline accepts several types of inputs which are detailed below:"),iy=l(),W=r("ul"),ud=r("li"),gd=r("code"),ly=a("pipeline(table, query)"),py=l(),_d=r("li"),vd=r("code"),cy=a("pipeline(table, [query])"),dy=l(),wd=r("li"),bd=r("code"),my=a("pipeline(table=table, query=query)"),fy=l(),kd=r("li"),Pd=r("code"),hy=a("pipeline(table=table, query=[query])"),uy=l(),Td=r("li"),yd=r("code"),gy=a('pipeline({"table": table, "query": query})'),_y=l(),xd=r("li"),$d=r("code"),vy=a('pipeline({"table": table, "query": [query]})'),wy=l(),Ed=r("li"),qd=r("code"),by=a('pipeline([{"table": table, "query": query}, {"table": table, "query": query}])'),ky=l(),ws=r("p"),Py=a("The "),Ad=r("code"),Ty=a("table"),yy=a(" argument should be a dict or a DataFrame built from that dict, containing the whole table:"),xy=l(),Cd=r("p"),$y=a("Example:"),Ey=l(),f(bs.$$.fragment),qy=l(),zd=r("p"),Ay=a("This dictionary can be passed in as such, or can be converted to a pandas DataFrame:"),Cy=l(),Dd=r("p"),zy=a("Example:"),Dy=l(),f(ks.$$.fragment),fh=l(),Et=r("h3"),Nn=r("a"),Id=r("span"),f(Ps.$$.fragment),Iy=l(),jd=r("span"),jy=a("TextClassificationPipeline"),hh=l(),Q=r("div"),f(Ts.$$.fragment),Sy=l(),qt=r("p"),My=a("Text classification pipeline using any "),Sd=r("code"),Fy=a("ModelForSequenceClassification"),Ly=a(". See the "),Di=r("a"),Uy=a(`sequence classification examples`),Gy=a(" for more information."),Ny=l(),At=r("p"),Oy=a("This text classification pipeline can currently be loaded from "),Ii=r("a"),Ry=a("pipeline()"),Hy=a(` using the following task identifier: `),Md=r("code"),Wy=a('"sentiment-analysis"'),Qy=a(" (for classifying sequences according to positive or negative sentiments)."),Vy=l(),ys=r("p"),By=a("If multiple classification labels are available ("),Fd=r("code"),Zy=a("model.config.num_labels >= 2"),Yy=a(`), the pipeline will run a softmax over the results. If there is a single label, the pipeline will run a sigmoid over the result.`),Xy=l(),xs=r("p"),Ky=a(`The models that this pipeline can use are models that have been fine-tuned on a sequence classification task. See the up-to-date list of available models on `),$s=r("a"),Jy=a("huggingface.co/models"),e2=a("."),t2=l(),On=r("div"),f(Es.$$.fragment),n2=l(),Ld=r("p"),r2=a("Classify the text(s) given as inputs."),uh=l(),Ct=r("h3"),Rn=r("a"),Ud=r("span"),f(qs.$$.fragment),o2=l(),Gd=r("span"),s2=a("TextGenerationPipeline"),gh=l(),se=r("div"),f(As.$$.fragment),a2=l(),Cs=r("p"),i2=a("Language generation pipeline using any "),Nd=r("code"),l2=a("ModelWithLMHead"),p2=a(`. This pipeline predicts the words that will follow a specified text prompt.`),c2=l(),zt=r("p"),d2=a("This language generation pipeline can currently be loaded from "),ji=r("a"),m2=a("pipeline()"),f2=a(` using the following task identifier: `),Od=r("code"),h2=a('"text-generation"'),u2=a("."),g2=l(),zs=r("p"),_2=a(`The models that this pipeline can use are models that have been trained with an autoregressive language modeling objective, which includes the uni-directional models in the library (e.g. gpt2). See the list of available models on `),Ds=r("a"),v2=a("huggingface.co/models"),w2=a("."),b2=l(),Hn=r("div"),f(Is.$$.fragment),k2=l(),Rd=r("p"),P2=a("Complete the prompt(s) given as inputs."),_h=l(),Dt=r("h3"),Wn=r("a"),Hd=r("span"),f(js.$$.fragment),T2=l(),Wd=r("span"),y2=a("Text2TextGenerationPipeline"),vh=l(),S=r("div"),f(Ss.$$.fragment),x2=l(),Qd=r("p"),$2=a("Pipeline for text to text generation using seq2seq models."),E2=l(),It=r("p"),q2=a("This Text2TextGenerationPipeline pipeline can currently be loaded from "),Si=r("a"),A2=a("pipeline()"),C2=a(` using the following task identifier: `),Vd=r("code"),z2=a('"text2text-generation"'),D2=a("."),I2=l(),Ms=r("p"),j2=a(`The models that this pipeline can use are models that have been fine-tuned on a translation task. See the up-to-date list of available models on `),Fs=r("a"),S2=a("huggingface.co/models"),M2=a("."),F2=l(),Bd=r("p"),L2=a("Usage:"),U2=l(),f(Ls.$$.fragment),G2=l(),Qn=r("div"),f(Us.$$.fragment),N2=l(),Zd=r("p"),O2=a("Generate the output text(s) using text(s) given as inputs."),R2=l(),Vn=r("div"),f(Gs.$$.fragment),H2=l(),Yd=r("p"),W2=a("Checks whether there might be something wrong with given input with regard to the model."),wh=l(),jt=r("h3"),Bn=r("a"),Xd=r("span"),f(Ns.$$.fragment),Q2=l(),Kd=r("span"),V2=a("TokenClassificationPipeline"),bh=l(),q=r("div"),f(Os.$$.fragment),B2=l(),St=r("p"),Z2=a("Named Entity Recognition pipeline using any "),Jd=r("code"),Y2=a("ModelForTokenClassification"),X2=a(". See the "),Mi=r("a"),K2=a(`named entity recognition examples`),J2=a(" for more information."),e4=l(),Mt=r("p"),t4=a("This token recognition pipeline can currently be loaded from "),Fi=r("a"),n4=a("pipeline()"),r4=a(` using the following task identifier: `),em=r("code"),o4=a('"ner"'),s4=a(" (for predicting the classes of tokens in a sequence: person, organisation, location or miscellaneous)."),a4=l(),Rs=r("p"),i4=a(`The models that this pipeline can use are models that have been fine-tuned on a token classification task. See the up-to-date list of available models on `),Hs=r("a"),l4=a("huggingface.co/models"),p4=a("."),c4=l(),Zn=r("div"),f(Ws.$$.fragment),d4=l(),tm=r("p"),m4=a("Classify each token of the text(s) given as inputs."),f4=l(),je=r("div"),f(Qs.$$.fragment),h4=l(),nm=r("p"),u4=a("Override tokens from a given word that disagree to force agreement on word boundaries."),g4=l(),rm=r("p"),_4=a(`Example: micro|soft| com|pany| B-ENT I-NAME I-ENT I-ENT will be rewritten with first strategy as microsoft| company| B-ENT I-ENT`),v4=l(),Yn=r("div"),f(Vs.$$.fragment),w4=l(),om=r("p"),b4=a("Fuse various numpy arrays into dicts with all the information needed for aggregation"),k4=l(),Xn=r("div"),f(Bs.$$.fragment),P4=l(),sm=r("p"),T4=a("Find and group together the adjacent tokens with the same entity predicted."),y4=l(),Kn=r("div"),f(Zs.$$.fragment),x4=l(),am=r("p"),$4=a("Group together the adjacent tokens with the same entity predicted."),kh=l(),Ft=r("h3"),Jn=r("a"),im=r("span"),f(Ys.$$.fragment),E4=l(),lm=r("span"),q4=a("TranslationPipeline"),Ph=l(),U=r("div"),f(Xs.$$.fragment),A4=l(),pm=r("p"),C4=a("Translates from one language to another."),z4=l(),Lt=r("p"),D4=a("This translation pipeline can currently be loaded from "),Li=r("a"),I4=a("pipeline()"),j4=a(` using the following task identifier: `),cm=r("code"),S4=a('"translation_xx_to_yy"'),M4=a("."),F4=l(),Ks=r("p"),L4=a(`The models that this pipeline can use are models that have been fine-tuned on a translation task. See the up-to-date list of available models on `),Js=r("a"),U4=a("huggingface.co/models"),G4=a("."),N4=l(),dm=r("p"),O4=a("Usage:"),R4=l(),f(ea.$$.fragment),H4=l(),er=r("div"),f(ta.$$.fragment),W4=l(),mm=r("p"),Q4=a("Translate the text(s) given as inputs."),Th=l(),Ut=r("h3"),tr=r("a"),fm=r("span"),f(na.$$.fragment),V4=l(),hm=r("span"),B4=a("ZeroShotClassificationPipeline"),yh=l(),V=r("div"),f(ra.$$.fragment),Z4=l(),oa=r("p"),Y4=a("NLI-based zero-shot classification pipeline using a "),um=r("code"),X4=a("ModelForSequenceClassification"),K4=a(` trained on NLI (natural language inference) tasks.`),J4=l(),Ee=r("p"),e0=a(`Any combination of sequences and labels can be passed and each combination will be posed as a premise/hypothesis pair and passed to the pretrained model. Then, the logit for `),gm=r("em"),t0=a("entailment"),n0=a(` is taken as the logit for the candidate label being valid. Any NLI model can be used, but the id of the `),_m=r("em"),r0=a("entailment"),o0=a(` label must be included in the model config\u2019s :attr:`),vm=r("em"),s0=a("~transformers.PretrainedConfig.label2id"),a0=a("."),i0=l(),Gt=r("p"),l0=a("This NLI pipeline can currently be loaded from "),Ui=r("a"),p0=a("pipeline()"),c0=a(` using the following task identifier: `),wm=r("code"),d0=a('"zero-shot-classification"'),m0=a("."),f0=l(),sa=r("p"),h0=a(`The models that this pipeline can use are models that have been fine-tuned on an NLI task. See the up-to-date list of available models on `),aa=r("a"),u0=a("huggingface.co/models"),g0=a("."),_0=l(),nr=r("div"),f(ia.$$.fragment),v0=l(),la=r("p"),w0=a("Classify the sequence(s) given as inputs. See the "),Gi=r("a"),b0=a("ZeroShotClassificationPipeline"),k0=a(` documentation for more information.`),xh=l(),Nt=r("h3"),rr=r("a"),bm=r("span"),f(pa.$$.fragment),P0=l(),km=r("span"),T0=a("ZeroShotImageClassificationPipeline"),$h=l(),ae=r("div"),f(ca.$$.fragment),y0=l(),Ot=r("p"),x0=a("Zero shot image classification pipeline using "),Pm=r("code"),$0=a("CLIPModel"),E0=a(`. This pipeline predicts the class of an image when you provide an image and a set of `),Tm=r("code"),q0=a("candidate_labels"),A0=a("."),C0=l(),Rt=r("p"),z0=a("This image classification pipeline can currently be loaded from "),Ni=r("a"),D0=a("pipeline()"),I0=a(` using the following task identifier: `),ym=r("code"),j0=a('"zero-shot-image-classification"'),S0=a("."),M0=l(),da=r("p"),F0=a(`See the list of available models on `),ma=r("a"),L0=a("huggingface.co/models"),U0=a("."),G0=l(),or=r("div"),f(fa.$$.fragment),N0=l(),xm=r("p"),O0=a("Assign labels to the image(s) passed as inputs."),Eh=l(),Ht=r("h2"),sr=r("a"),$m=r("span"),f(ha.$$.fragment),R0=l(),Oi=r("span"),H0=a("Parent class: "),Em=r("code"),W0=a("Pipeline"),qh=l(),P=r("div"),f(ua.$$.fragment),Q0=l(),qm=r("p"),V0=a(`The Pipeline class is the class from which all pipelines inherit. Refer to this class for methods shared across different pipelines.`),B0=l(),Am=r("p"),Z0=a(`Base class implementing pipelined operations. Pipeline workflow is defined as a sequence of the following operations:`),Y0=l(),Cm=r("p"),X0=a("Input -> Tokenization -> Model Inference -> Post-Processing (task dependent) -> Output"),K0=l(),zm=r("p"),J0=a("Pipeline supports running on CPU or GPU through the device argument (see below)."),ex=l(),_e=r("p"),tx=a("Some pipeline, like for instance "),Ri=r("a"),nx=a("FeatureExtractionPipeline"),rx=a(" ("),Dm=r("code"),ox=a("'feature-extraction'"),sx=a(`) output large tensor object as nested-lists. In order to avoid dumping such large structure as textual data we provide the `),Im=r("code"),ax=a("binary_output"),ix=a(` constructor argument. If set to `),jm=r("code"),lx=a("True"),px=a(", the output will be stored in the pickle format."),cx=l(),ar=r("div"),f(ga.$$.fragment),dx=l(),Sm=r("p"),mx=a("Check if the model class is in supported by the pipeline."),fx=l(),ve=r("div"),f(_a.$$.fragment),hx=l(),Mm=r("p"),ux=a("Context Manager allowing tensor allocation on the user-specified device in framework agnostic way."),gx=l(),Fm=r("p"),_x=a("Examples:"),vx=l(),f(va.$$.fragment),wx=l(),ir=r("div"),f(wa.$$.fragment),bx=l(),Lm=r("p"),kx=a("Ensure PyTorch tensors are on the specified device."),Px=l(),lr=r("div"),f(ba.$$.fragment),Tx=l(),ka=r("p"),yx=a("Postprocess will receive the raw outputs of the "),Um=r("code"),xx=a("_forward"),$x=a(` method, generally tensors, and reformat them into something more friendly. Generally it will output a list or a dict or results (containing just strings and numbers).`),Ex=l(),pr=r("div"),f(Pa.$$.fragment),qx=l(),Ta=r("p"),Ax=a("Scikit / Keras interface to transformers\u2019 pipelines. This method will forward to "),Gm=r("strong"),Cx=a("call"),zx=a("()."),Dx=l(),cr=r("div"),f(ya.$$.fragment),Ix=l(),Wt=r("p"),jx=a("Preprocess will take the "),Nm=r("code"),Sx=a("input_"),Mx=a(` of a specific pipeline and return a dictionnary of everything necessary for `),Om=r("code"),Fx=a("_forward"),Lx=a(" to run properly. It should contain at least one tensor, but might have arbitrary other items."),Ux=l(),dr=r("div"),f(xa.$$.fragment),Gx=l(),Rm=r("p"),Nx=a("Save the pipeline\u2019s model and tokenizer."),Ox=l(),mr=r("div"),f($a.$$.fragment),Rx=l(),Ea=r("p"),Hx=a("Scikit / Keras interface to transformers\u2019 pipelines. This method will forward to "),Hm=r("strong"),Wx=a("call"),Qx=a("()."),this.h()},l(t){const d=xA('[data-svelte="svelte-1phssyn"]',document.head);x=o(d,"META",{name:!0,content:!0}),d.forEach(n),ie=p(t),$=o(t,"H1",{class:!0});var qa=s($);A=o(qa,"A",{id:!0,class:!0,href:!0});var Wm=s(A);fe=o(Wm,"SPAN",{});var Bx=s(fe);h(E.$$.fragment,Bx),Bx.forEach(n),Wm.forEach(n),O=p(qa),Te=o(qa,"SPAN",{});var Zx=s(Te);Dg=i(Zx,"Pipelines"),Zx.forEach(n),qa.forEach(n),Vm=p(t),Qt=o(t,"P",{});var Ch=s(Qt);Ig=i(Ch,`The pipelines are a great and easy way to use models for inference. These pipelines are objects that abstract most of the complex code from the library, offering a simple API dedicated to several tasks, including Named Entity Recognition, Masked Language Modeling, Sentiment Analysis, Feature Extraction and Question Answering. See the `),Ca=o(Ch,"A",{href:!0});var Yx=s(Ca);jg=i(Yx,"task summary"),Yx.forEach(n),Sg=i(Ch," for examples of use."),Ch.forEach(n),Bm=p(t),za=o(t,"P",{});var Xx=s(za);Mg=i(Xx,"There are two categories of pipeline abstractions to be aware about:"),Xx.forEach(n),Zm=p(t),Vt=o(t,"UL",{});var zh=s(Vt);yl=o(zh,"LI",{});var Kx=s(yl);wr=o(Kx,"P",{});var Dh=s(wr);Fg=i(Dh,"The "),Da=o(Dh,"A",{href:!0});var Jx=s(Da);Lg=i(Jx,"pipeline()"),Jx.forEach(n),Ug=i(Dh," which is the most powerful object encapsulating all other pipelines."),Dh.forEach(n),Kx.forEach(n),Gg=p(zh),br=o(zh,"LI",{});var Ih=s(br);xl=o(Ih,"P",{});var e3=s(xl);Ng=i(e3,"The other task-specific pipelines:"),e3.forEach(n),Og=p(Ih),b=o(Ih,"UL",{});var k=s(b);$l=o(k,"LI",{});var t3=s($l);Ia=o(t3,"A",{href:!0});var n3=s(Ia);Rg=i(n3,"AudioClassificationPipeline"),n3.forEach(n),t3.forEach(n),Hg=p(k),El=o(k,"LI",{});var r3=s(El);ja=o(r3,"A",{href:!0});var o3=s(ja);Wg=i(o3,"AutomaticSpeechRecognitionPipeline"),o3.forEach(n),r3.forEach(n),Qg=p(k),ql=o(k,"LI",{});var s3=s(ql);Sa=o(s3,"A",{href:!0});var a3=s(Sa);Vg=i(a3,"ConversationalPipeline"),a3.forEach(n),s3.forEach(n),Bg=p(k),Al=o(k,"LI",{});var i3=s(Al);Ma=o(i3,"A",{href:!0});var l3=s(Ma);Zg=i(l3,"FeatureExtractionPipeline"),l3.forEach(n),i3.forEach(n),Yg=p(k),Cl=o(k,"LI",{});var p3=s(Cl);Fa=o(p3,"A",{href:!0});var c3=s(Fa);Xg=i(c3,"FillMaskPipeline"),c3.forEach(n),p3.forEach(n),Kg=p(k),zl=o(k,"LI",{});var d3=s(zl);La=o(d3,"A",{href:!0});var m3=s(La);Jg=i(m3,"ImageClassificationPipeline"),m3.forEach(n),d3.forEach(n),e_=p(k),Dl=o(k,"LI",{});var f3=s(Dl);Ua=o(f3,"A",{href:!0});var h3=s(Ua);t_=i(h3,"ImageSegmentationPipeline"),h3.forEach(n),f3.forEach(n),n_=p(k),Il=o(k,"LI",{});var u3=s(Il);Ga=o(u3,"A",{href:!0});var g3=s(Ga);r_=i(g3,"ObjectDetectionPipeline"),g3.forEach(n),u3.forEach(n),o_=p(k),jl=o(k,"LI",{});var _3=s(jl);Na=o(_3,"A",{href:!0});var v3=s(Na);s_=i(v3,"QuestionAnsweringPipeline"),v3.forEach(n),_3.forEach(n),a_=p(k),Sl=o(k,"LI",{});var w3=s(Sl);Oa=o(w3,"A",{href:!0});var b3=s(Oa);i_=i(b3,"SummarizationPipeline"),b3.forEach(n),w3.forEach(n),l_=p(k),Ml=o(k,"LI",{});var k3=s(Ml);Ra=o(k3,"A",{href:!0});var P3=s(Ra);p_=i(P3,"TableQuestionAnsweringPipeline"),P3.forEach(n),k3.forEach(n),c_=p(k),Fl=o(k,"LI",{});var T3=s(Fl);Ha=o(T3,"A",{href:!0});var y3=s(Ha);d_=i(y3,"TextClassificationPipeline"),y3.forEach(n),T3.forEach(n),m_=p(k),Ll=o(k,"LI",{});var x3=s(Ll);Wa=o(x3,"A",{href:!0});var $3=s(Wa);f_=i($3,"TextGenerationPipeline"),$3.forEach(n),x3.forEach(n),h_=p(k),Ul=o(k,"LI",{});var E3=s(Ul);Qa=o(E3,"A",{href:!0});var q3=s(Qa);u_=i(q3,"Text2TextGenerationPipeline"),q3.forEach(n),E3.forEach(n),g_=p(k),Gl=o(k,"LI",{});var A3=s(Gl);Va=o(A3,"A",{href:!0});var C3=s(Va);__=i(C3,"TokenClassificationPipeline"),C3.forEach(n),A3.forEach(n),v_=p(k),Nl=o(k,"LI",{});var z3=s(Nl);Ba=o(z3,"A",{href:!0});var D3=s(Ba);w_=i(D3,"TranslationPipeline"),D3.forEach(n),z3.forEach(n),b_=p(k),Ol=o(k,"LI",{});var I3=s(Ol);Za=o(I3,"A",{href:!0});var j3=s(Za);k_=i(j3,"ZeroShotClassificationPipeline"),j3.forEach(n),I3.forEach(n),P_=p(k),Rl=o(k,"LI",{});var S3=s(Rl);Ya=o(S3,"A",{href:!0});var M3=s(Ya);T_=i(M3,"ZeroShotImageClassificationPipeline"),M3.forEach(n),S3.forEach(n),k.forEach(n),Ih.forEach(n),zh.forEach(n),Ym=p(t),Ve=o(t,"H2",{class:!0});var jh=s(Ve);Bt=o(jh,"A",{id:!0,class:!0,href:!0});var F3=s(Bt);Hl=o(F3,"SPAN",{});var L3=s(Hl);h(kr.$$.fragment,L3),L3.forEach(n),F3.forEach(n),y_=p(jh),Wl=o(jh,"SPAN",{});var U3=s(Wl);x_=i(U3,"The pipeline abstraction"),U3.forEach(n),jh.forEach(n),Xm=p(t),Zt=o(t,"P",{});var Sh=s(Zt);$_=i(Sh,"The "),Ql=o(Sh,"EM",{});var G3=s(Ql);E_=i(G3,"pipeline"),G3.forEach(n),q_=i(Sh,` abstraction is a wrapper around all the other available pipelines. It is instantiated as any other pipeline but can provide additional quality of life.`),Sh.forEach(n),Km=p(t),Xa=o(t,"P",{});var N3=s(Xa);A_=i(N3,"Simple call on one item:"),N3.forEach(n),Jm=p(t),h(Pr.$$.fragment,t),ef=p(t),Yt=o(t,"P",{});var Mh=s(Yt);C_=i(Mh,"If you want to use a specific model from the "),Tr=o(Mh,"A",{href:!0,rel:!0});var O3=s(Tr);z_=i(O3,"hub"),O3.forEach(n),D_=i(Mh,` you can ignore the task if the model on the hub already defines it:`),Mh.forEach(n),tf=p(t),h(yr.$$.fragment,t),nf=p(t),Xt=o(t,"P",{});var Fh=s(Xt);I_=i(Fh,"To call a pipeline on many items, you can either call with a "),Vl=o(Fh,"EM",{});var R3=s(Vl);j_=i(R3,"list"),R3.forEach(n),S_=i(Fh,"."),Fh.forEach(n),rf=p(t),h(xr.$$.fragment,t),of=p(t),Kt=o(t,"P",{});var Lh=s(Kt);M_=i(Lh,"To iterate of full datasets it is recommended to use a "),Bl=o(Lh,"CODE",{});var H3=s(Bl);F_=i(H3,"dataset"),H3.forEach(n),L_=i(Lh,` directly. This means you don\u2019t need to allocate the whole dataset at once, nor do you need to do batching yourself. This should work just as fast as custom loops on GPU. If it doesn\u2019t don\u2019t hesitate to create an issue.`),Lh.forEach(n),sf=p(t),h($r.$$.fragment,t),af=p(t),Ka=o(t,"P",{});var W3=s(Ka);U_=i(W3,"For ease of use, a generator is also possible:"),W3.forEach(n),lf=p(t),h(Er.$$.fragment,t),pf=p(t),R=o(t,"DIV",{class:!0});var we=s(R);h(qr.$$.fragment,we),G_=p(we),Ar=o(we,"P",{});var Uh=s(Ar);N_=i(Uh,"Utility factory method to build a "),Ja=o(Uh,"A",{href:!0});var Q3=s(Ja);O_=i(Q3,"Pipeline"),Q3.forEach(n),R_=i(Uh,"."),Uh.forEach(n),H_=p(we),Zl=o(we,"P",{});var V3=s(Zl);W_=i(V3,"Pipelines are made of:"),V3.forEach(n),Q_=p(we),Be=o(we,"UL",{});var Hi=s(Be);Cr=o(Hi,"LI",{});var Gh=s(Cr);V_=i(Gh,"A "),ei=o(Gh,"A",{href:!0});var B3=s(ei);B_=i(B3,"tokenizer"),B3.forEach(n),Z_=i(Gh," in charge of mapping raw textual input to token."),Gh.forEach(n),Y_=p(Hi),zr=o(Hi,"LI",{});var Nh=s(zr);X_=i(Nh,"A "),ti=o(Nh,"A",{href:!0});var Z3=s(ti);K_=i(Z3,"model"),Z3.forEach(n),J_=i(Nh," to make predictions from the inputs."),Nh.forEach(n),e1=p(Hi),Yl=o(Hi,"LI",{});var Y3=s(Yl);t1=i(Y3,"Some (optional) post processing for enhancing model\u2019s output."),Y3.forEach(n),Hi.forEach(n),n1=p(we),Xl=o(we,"P",{});var X3=s(Xl);r1=i(X3,"Examples:"),X3.forEach(n),o1=p(we),h(Dr.$$.fragment,we),we.forEach(n),cf=p(t),Ze=o(t,"H2",{class:!0});var Oh=s(Ze);Jt=o(Oh,"A",{id:!0,class:!0,href:!0});var K3=s(Jt);Kl=o(K3,"SPAN",{});var J3=s(Kl);h(Ir.$$.fragment,J3),J3.forEach(n),K3.forEach(n),s1=p(Oh),Jl=o(Oh,"SPAN",{});var e$=s(Jl);a1=i(e$,"Pipeline batching"),e$.forEach(n),Oh.forEach(n),df=p(t),qe=o(t,"P",{});var Wi=s(qe);i1=i(Wi,`All pipelines can use batching. This will work whenever the pipeline uses its streaming ability (so when passing lists or `),ep=o(Wi,"CODE",{});var t$=s(ep);l1=i(t$,"Dataset"),t$.forEach(n),p1=i(Wi," or "),tp=o(Wi,"CODE",{});var n$=s(tp);c1=i(n$,"generator"),n$.forEach(n),d1=i(Wi,")."),Wi.forEach(n),mf=p(t),h(jr.$$.fragment,t),ff=p(t),h(en.$$.fragment,t),hf=p(t),h(Sr.$$.fragment,t),uf=p(t),h(Mr.$$.fragment,t),gf=p(t),ni=o(t,"P",{});var r$=s(ni);m1=i(r$,"Example where it\u2019s most a slowdown:"),r$.forEach(n),_f=p(t),h(Fr.$$.fragment,t),vf=p(t),tn=o(t,"P",{});var Rh=s(tn);f1=i(Rh,"This is a occasional very long sentence compared to the other. In that case, the "),np=o(Rh,"STRONG",{});var o$=s(np);h1=i(o$,"whole"),o$.forEach(n),u1=i(Rh,` batch will need to be 400 tokens long, so the whole batch will be [64, 400] instead of [64, 4], leading to the high slowdown. Even worse, on bigger batches, the program simply crashes.`),Rh.forEach(n),wf=p(t),h(Lr.$$.fragment,t),bf=p(t),ri=o(t,"P",{});var s$=s(ri);g1=i(s$,`There are no good (general) solutions for this problem, and your mileage may vary depending on your use cases. Rule of thumb:`),s$.forEach(n),kf=p(t),oi=o(t,"P",{});var a$=s(oi);_1=i(a$,"For users, a rule of thumb is:"),a$.forEach(n),Pf=p(t),le=o(t,"UL",{});var Se=s(le);rp=o(Se,"LI",{});var i$=s(rp);op=o(i$,"P",{});var l$=s(op);sp=o(l$,"STRONG",{});var p$=s(sp);v1=i(p$,`Measure performance on your load, with your hardware. Measure, measure, and keep measuring. Real numbers are the only way to go.`),p$.forEach(n),l$.forEach(n),i$.forEach(n),w1=p(Se),ap=o(Se,"LI",{});var c$=s(ap);ip=o(c$,"P",{});var d$=s(ip);b1=i(d$,"If you are latency constrained (live product doing inference), don\u2019t batch"),d$.forEach(n),c$.forEach(n),k1=p(Se),lp=o(Se,"LI",{});var m$=s(lp);pp=o(m$,"P",{});var f$=s(pp);P1=i(f$,"If you are using CPU, don\u2019t batch."),f$.forEach(n),m$.forEach(n),T1=p(Se),Ur=o(Se,"LI",{});var Hh=s(Ur);cp=o(Hh,"P",{});var h$=s(cp);y1=i(h$,"If you are using throughput (you want to run your model on a bunch of static data), on GPU, then:"),h$.forEach(n),x1=p(Hh),Ye=o(Hh,"UL",{});var Qi=s(Ye);dp=o(Qi,"LI",{});var u$=s(dp);$1=i(u$,`If you have no clue about the size of the sequence_length (\u201Cnatural\u201D data), by default don\u2019t batch, measure and try tentatively to add it, add OOM checks to recover when it will fail (and it will at some point if you don\u2019t control the sequence_length.)`),u$.forEach(n),E1=p(Qi),mp=o(Qi,"LI",{});var g$=s(mp);q1=i(g$,`If your sequence_length is super regular, then batching is more likely to be VERY interesting, measure and push it until you get OOMs.`),g$.forEach(n),A1=p(Qi),fp=o(Qi,"LI",{});var _$=s(fp);C1=i(_$,"The larger the GPU the more likely batching is going to be more interesting"),_$.forEach(n),Qi.forEach(n),Hh.forEach(n),z1=p(Se),hp=o(Se,"LI",{});var v$=s(hp);up=o(v$,"P",{});var w$=s(up);D1=i(w$,"As soon as you enable batching, make sure you can handle OOMs nicely."),w$.forEach(n),v$.forEach(n),Se.forEach(n),Tf=p(t),Xe=o(t,"H2",{class:!0});var Wh=s(Xe);nn=o(Wh,"A",{id:!0,class:!0,href:!0});var b$=s(nn);gp=o(b$,"SPAN",{});var k$=s(gp);h(Gr.$$.fragment,k$),k$.forEach(n),b$.forEach(n),I1=p(Wh),_p=o(Wh,"SPAN",{});var P$=s(_p);j1=i(P$,"Pipeline chunk batching"),P$.forEach(n),Wh.forEach(n),yf=p(t),ye=o(t,"P",{});var Aa=s(ye);vp=o(Aa,"CODE",{});var T$=s(vp);S1=i(T$,"zero-shot-classification"),T$.forEach(n),M1=i(Aa," and "),wp=o(Aa,"CODE",{});var y$=s(wp);F1=i(y$,"question-answering"),y$.forEach(n),L1=i(Aa,` are slightly specific in the sense, that a single input might yield multiple forward pass of a model. Under normal circumstances, this would yield issues with `),bp=o(Aa,"CODE",{});var x$=s(bp);U1=i(x$,"batch_size"),x$.forEach(n),G1=i(Aa," argument."),Aa.forEach(n),xf=p(t),Ae=o(t,"P",{});var Vi=s(Ae);N1=i(Vi,"In order to circumvent this issue, both of these pipelines are a bit specific, they are "),kp=o(Vi,"CODE",{});var $$=s(kp);O1=i($$,"ChunkPipeline"),$$.forEach(n),R1=i(Vi,` instead of regular `),Pp=o(Vi,"CODE",{});var E$=s(Pp);H1=i(E$,"Pipeline"),E$.forEach(n),W1=i(Vi,". In short:"),Vi.forEach(n),$f=p(t),h(Nr.$$.fragment,t),Ef=p(t),si=o(t,"P",{});var q$=s(si);Q1=i(q$,"Now becomes:"),q$.forEach(n),qf=p(t),h(Or.$$.fragment,t),Af=p(t),ai=o(t,"P",{});var A$=s(ai);V1=i(A$,`This should be very transparent to your code because the pipelines are used in the same way.`),A$.forEach(n),Cf=p(t),rn=o(t,"P",{});var Qh=s(rn);B1=i(Qh,`This is a simplified view, since the pipeline can handle automatically the batch to ! Meaning you don\u2019t have to care about how many forward passes you inputs are actually going to trigger, you can optimize the `),Tp=o(Qh,"CODE",{});var C$=s(Tp);Z1=i(C$,"batch_size"),C$.forEach(n),Y1=i(Qh,` independently of the inputs. The caveats from the previous section still apply.`),Qh.forEach(n),zf=p(t),Ke=o(t,"H2",{class:!0});var Vh=s(Ke);on=o(Vh,"A",{id:!0,class:!0,href:!0});var z$=s(on);yp=o(z$,"SPAN",{});var D$=s(yp);h(Rr.$$.fragment,D$),D$.forEach(n),z$.forEach(n),X1=p(Vh),xp=o(Vh,"SPAN",{});var I$=s(xp);K1=i(I$,"Pipeline custom code"),I$.forEach(n),Vh.forEach(n),Df=p(t),ii=o(t,"P",{});var j$=s(ii);J1=i(j$,"If you want to override a specific pipeline."),j$.forEach(n),If=p(t),sn=o(t,"P",{});var Bh=s(sn);ev=i(Bh,`Don\u2019t hesitate to create an issue for your task at hand, the goal of the pipeline is to be easy to use and support most cases, so `),$p=o(Bh,"CODE",{});var S$=s($p);tv=i(S$,"transformers"),S$.forEach(n),nv=i(Bh," could maybe support your use case."),Bh.forEach(n),jf=p(t),li=o(t,"P",{});var M$=s(li);rv=i(M$,"If you want to try simply you can:"),M$.forEach(n),Sf=p(t),pi=o(t,"UL",{});var F$=s(pi);Ep=o(F$,"LI",{});var L$=s(Ep);ov=i(L$,"Subclass your pipeline of choice"),L$.forEach(n),F$.forEach(n),Mf=p(t),h(Hr.$$.fragment,t),Ff=p(t),ci=o(t,"P",{});var U$=s(ci);sv=i(U$,"That should enable you to do all the custom code you want."),U$.forEach(n),Lf=p(t),Je=o(t,"H2",{class:!0});var Zh=s(Je);an=o(Zh,"A",{id:!0,class:!0,href:!0});var G$=s(an);qp=o(G$,"SPAN",{});var N$=s(qp);h(Wr.$$.fragment,N$),N$.forEach(n),G$.forEach(n),av=p(Zh),Ap=o(Zh,"SPAN",{});var O$=s(Ap);iv=i(O$,"Implementing a pipeline"),O$.forEach(n),Zh.forEach(n),Uf=p(t),di=o(t,"P",{});var R$=s(di);mi=o(R$,"A",{href:!0});var H$=s(mi);lv=i(H$,"Implementing a new pipeline"),H$.forEach(n),R$.forEach(n),Gf=p(t),et=o(t,"H2",{class:!0});var Yh=s(et);ln=o(Yh,"A",{id:!0,class:!0,href:!0});var W$=s(ln);Cp=o(W$,"SPAN",{});var Q$=s(Cp);h(Qr.$$.fragment,Q$),Q$.forEach(n),W$.forEach(n),pv=p(Yh),zp=o(Yh,"SPAN",{});var V$=s(zp);cv=i(V$,"The task specific pipelines"),V$.forEach(n),Yh.forEach(n),Nf=p(t),tt=o(t,"H3",{class:!0});var Xh=s(tt);pn=o(Xh,"A",{id:!0,class:!0,href:!0});var B$=s(pn);Dp=o(B$,"SPAN",{});var Z$=s(Dp);h(Vr.$$.fragment,Z$),Z$.forEach(n),B$.forEach(n),dv=p(Xh),Ip=o(Xh,"SPAN",{});var Y$=s(Ip);mv=i(Y$,"AudioClassificationPipeline"),Y$.forEach(n),Xh.forEach(n),Of=p(t),J=o(t,"DIV",{class:!0});var Me=s(J);h(Br.$$.fragment,Me),fv=p(Me),Zr=o(Me,"P",{});var Kh=s(Zr);hv=i(Kh,"Audio classification pipeline using any "),jp=o(Kh,"CODE",{});var X$=s(jp);uv=i(X$,"AutoModelForAudioClassification"),X$.forEach(n),gv=i(Kh,`. This pipeline predicts the class of a raw waveform or an audio file. In case of an audio file, ffmpeg should be installed to support multiple audio formats.`),Kh.forEach(n),_v=p(Me),nt=o(Me,"P",{});var Bi=s(nt);vv=i(Bi,"This pipeline can currently be loaded from "),fi=o(Bi,"A",{href:!0});var K$=s(fi);wv=i(K$,"pipeline()"),K$.forEach(n),bv=i(Bi,` using the following task identifier: `),Sp=o(Bi,"CODE",{});var J$=s(Sp);kv=i(J$,'"audio-classification"'),J$.forEach(n),Pv=i(Bi,"."),Bi.forEach(n),Tv=p(Me),Yr=o(Me,"P",{});var Jh=s(Yr);yv=i(Jh,`See the list of available models on `),Xr=o(Jh,"A",{href:!0,rel:!0});var eE=s(Xr);xv=i(eE,"huggingface.co/models"),eE.forEach(n),$v=i(Jh,"."),Jh.forEach(n),Ev=p(Me),cn=o(Me,"DIV",{class:!0});var eu=s(cn);h(Kr.$$.fragment,eu),qv=p(eu),Jr=o(eu,"P",{});var tu=s(Jr);Av=i(tu,"Classify the sequence(s) given as inputs. See the "),hi=o(tu,"A",{href:!0});var tE=s(hi);Cv=i(tE,"AutomaticSpeechRecognitionPipeline"),tE.forEach(n),zv=i(tu,` documentation for more information.`),tu.forEach(n),eu.forEach(n),Me.forEach(n),Rf=p(t),rt=o(t,"H3",{class:!0});var nu=s(rt);dn=o(nu,"A",{id:!0,class:!0,href:!0});var nE=s(dn);Mp=o(nE,"SPAN",{});var rE=s(Mp);h(eo.$$.fragment,rE),rE.forEach(n),nE.forEach(n),Dv=p(nu),Fp=o(nu,"SPAN",{});var oE=s(Fp);Iv=i(oE,"AutomaticSpeechRecognitionPipeline"),oE.forEach(n),nu.forEach(n),Hf=p(t),he=o(t,"DIV",{class:!0});var fr=s(he);h(to.$$.fragment,fr),jv=p(fr),Lp=o(fr,"P",{});var sE=s(Lp);Sv=i(sE,"Pipeline that aims at extracting spoken text contained within some audio."),sE.forEach(n),Mv=p(fr),Up=o(fr,"P",{});var aE=s(Up);Fv=i(aE,`The input can be either a raw waveform or a audio file. In case of the audio file, ffmpeg should be installed for to support multiple audio formats`),aE.forEach(n),Lv=p(fr),mn=o(fr,"DIV",{class:!0});var ru=s(mn);h(no.$$.fragment,ru),Uv=p(ru),ro=o(ru,"P",{});var ou=s(ro);Gv=i(ou,"Classify the sequence(s) given as inputs. See the "),ui=o(ou,"A",{href:!0});var iE=s(ui);Nv=i(iE,"AutomaticSpeechRecognitionPipeline"),iE.forEach(n),Ov=i(ou,` documentation for more information.`),ou.forEach(n),ru.forEach(n),fr.forEach(n),Wf=p(t),ot=o(t,"H3",{class:!0});var su=s(ot);fn=o(su,"A",{id:!0,class:!0,href:!0});var lE=s(fn);Gp=o(lE,"SPAN",{});var pE=s(Gp);h(oo.$$.fragment,pE),pE.forEach(n),lE.forEach(n),Rv=p(su),Np=o(su,"SPAN",{});var cE=s(Np);Hv=i(cE,"ConversationalPipeline"),cE.forEach(n),su.forEach(n),Qf=p(t),D=o(t,"DIV",{class:!0});var B=s(D);h(so.$$.fragment,B),Wv=p(B),xe=o(B,"P",{});var hr=s(xe);Qv=i(hr,`Utility class containing a conversation and its history. This class is meant to be used as an input to the `),gi=o(hr,"A",{href:!0});var dE=s(gi);Vv=i(dE,"ConversationalPipeline"),dE.forEach(n),Bv=i(hr,`. The conversation contains a number of utility function to manage the addition of new user input and generated model responses. A conversation needs to contain an unprocessed user input before being passed to the `),_i=o(hr,"A",{href:!0});var mE=s(_i);Zv=i(mE,"ConversationalPipeline"),mE.forEach(n),Yv=i(hr,`. This user input is either created when the class is instantiated, or by calling `),Op=o(hr,"CODE",{});var fE=s(Op);Xv=i(fE,'conversational_pipeline.append_response("input")'),fE.forEach(n),Kv=i(hr," after a conversation turn."),hr.forEach(n),Jv=p(B),Rp=o(B,"P",{});var hE=s(Rp);ew=i(hE,"Usage:"),hE.forEach(n),tw=p(B),h(ao.$$.fragment,B),nw=p(B),hn=o(B,"DIV",{class:!0});var au=s(hn);h(io.$$.fragment,au),rw=p(au),lo=o(au,"P",{});var iu=s(lo);ow=i(iu,"Add a user input to the conversation for the next round. This populates the internal "),Hp=o(iu,"CODE",{});var uE=s(Hp);sw=i(uE,"new_user_input"),uE.forEach(n),aw=i(iu," field."),iu.forEach(n),au.forEach(n),iw=p(B),un=o(B,"DIV",{class:!0});var lu=s(un);h(po.$$.fragment,lu),lw=p(lu),Wp=o(lu,"P",{});var gE=s(Wp);pw=i(gE,"Append a response to the list of generated responses."),gE.forEach(n),lu.forEach(n),cw=p(B),Ce=o(B,"DIV",{class:!0});var Zi=s(Ce);h(co.$$.fragment,Zi),dw=p(Zi),Qp=o(Zi,"P",{});var _E=s(Qp);mw=i(_E,"Iterates over all blobs of the conversation."),_E.forEach(n),fw=p(Zi),ue=o(Zi,"P",{});var Fe=s(ue);hw=i(Fe,"Returns: Iterator of (is_user, text_chunk) in chronological order of the conversation. "),Vp=o(Fe,"CODE",{});var vE=s(Vp);uw=i(vE,"is_user"),vE.forEach(n),gw=i(Fe," is a "),Bp=o(Fe,"CODE",{});var wE=s(Bp);_w=i(wE,"bool"),wE.forEach(n),vw=i(Fe,`, `),Zp=o(Fe,"CODE",{});var bE=s(Zp);ww=i(bE,"text_chunks"),bE.forEach(n),bw=i(Fe," is a "),Yp=o(Fe,"CODE",{});var kE=s(Yp);kw=i(kE,"str"),kE.forEach(n),Pw=i(Fe,"."),Fe.forEach(n),Zi.forEach(n),Tw=p(B),gn=o(B,"DIV",{class:!0});var pu=s(gn);h(mo.$$.fragment,pu),yw=p(pu),$e=o(pu,"P",{});var ur=s($e);xw=i(ur,"Mark the conversation as processed (moves the content of "),Xp=o(ur,"CODE",{});var PE=s(Xp);$w=i(PE,"new_user_input"),PE.forEach(n),Ew=i(ur," to "),Kp=o(ur,"CODE",{});var TE=s(Kp);qw=i(TE,"past_user_inputs"),TE.forEach(n),Aw=i(ur,`) and empties the `),Jp=o(ur,"CODE",{});var yE=s(Jp);Cw=i(yE,"new_user_input"),yE.forEach(n),zw=i(ur," field."),ur.forEach(n),pu.forEach(n),B.forEach(n),Vf=p(t),M=o(t,"DIV",{class:!0});var pe=s(M);h(fo.$$.fragment,pe),Dw=p(pe),ec=o(pe,"P",{});var xE=s(ec);Iw=i(xE,"Multi-turn conversational pipeline."),xE.forEach(n),jw=p(pe),st=o(pe,"P",{});var Yi=s(st);Sw=i(Yi,"This conversational pipeline can currently be loaded from "),vi=o(Yi,"A",{href:!0});var $E=s(vi);Mw=i($E,"pipeline()"),$E.forEach(n),Fw=i(Yi,` using the following task identifier: `),tc=o(Yi,"CODE",{});var EE=s(tc);Lw=i(EE,'"conversational"'),EE.forEach(n),Uw=i(Yi,"."),Yi.forEach(n),Gw=p(pe),ge=o(pe,"P",{});var Le=s(ge);Nw=i(Le,`The models that this pipeline can use are models that have been fine-tuned on a multi-turn conversational task, currently: `),nc=o(Le,"EM",{});var qE=s(nc);Ow=i(qE,"\u2018microsoft/DialoGPT-small\u2019"),qE.forEach(n),Rw=i(Le,", "),rc=o(Le,"EM",{});var AE=s(rc);Hw=i(AE,"\u2018microsoft/DialoGPT-medium\u2019"),AE.forEach(n),Ww=i(Le,", "),oc=o(Le,"EM",{});var CE=s(oc);Qw=i(CE,"\u2018microsoft/DialoGPT-large\u2019"),CE.forEach(n),Vw=i(Le,`. See the up-to-date list of available models on `),ho=o(Le,"A",{href:!0,rel:!0});var zE=s(ho);Bw=i(zE,"huggingface.co/models"),zE.forEach(n),Zw=i(Le,"."),Le.forEach(n),Yw=p(pe),sc=o(pe,"P",{});var DE=s(sc);Xw=i(DE,"Usage:"),DE.forEach(n),Kw=p(pe),h(uo.$$.fragment,pe),Jw=p(pe),_n=o(pe,"DIV",{class:!0});var cu=s(_n);h(go.$$.fragment,cu),eb=p(cu),ac=o(cu,"P",{});var IE=s(ac);tb=i(IE,"Generate responses for the conversation(s) given as inputs."),IE.forEach(n),cu.forEach(n),pe.forEach(n),Bf=p(t),at=o(t,"H3",{class:!0});var du=s(at);vn=o(du,"A",{id:!0,class:!0,href:!0});var jE=s(vn);ic=o(jE,"SPAN",{});var SE=s(ic);h(_o.$$.fragment,SE),SE.forEach(n),jE.forEach(n),nb=p(du),lc=o(du,"SPAN",{});var ME=s(lc);rb=i(ME,"FeatureExtractionPipeline"),ME.forEach(n),du.forEach(n),Zf=p(t),ee=o(t,"DIV",{class:!0});var Ue=s(ee);h(vo.$$.fragment,Ue),ob=p(Ue),pc=o(Ue,"P",{});var FE=s(pc);sb=i(FE,`Feature extraction pipeline using no model head. This pipeline extracts the hidden states from the base transformer, which can be used as features in downstream tasks.`),FE.forEach(n),ab=p(Ue),it=o(Ue,"P",{});var Xi=s(it);ib=i(Xi,"This feature extraction pipeline can currently be loaded from "),wi=o(Xi,"A",{href:!0});var LE=s(wi);lb=i(LE,"pipeline()"),LE.forEach(n),pb=i(Xi,` using the task identifier: `),cc=o(Xi,"CODE",{});var UE=s(cc);cb=i(UE,'"feature-extraction"'),UE.forEach(n),db=i(Xi,"."),Xi.forEach(n),mb=p(Ue),wo=o(Ue,"P",{});var mu=s(wo);fb=i(mu,`All models may be used for this pipeline. See a list of all models, including community-contributed models on `),bo=o(mu,"A",{href:!0,rel:!0});var GE=s(bo);hb=i(GE,"huggingface.co/models"),GE.forEach(n),ub=i(mu,"."),mu.forEach(n),gb=p(Ue),wn=o(Ue,"DIV",{class:!0});var fu=s(wn);h(ko.$$.fragment,fu),_b=p(fu),dc=o(fu,"P",{});var NE=s(dc);vb=i(NE,"Extract the features of the input(s)."),NE.forEach(n),fu.forEach(n),Ue.forEach(n),Yf=p(t),lt=o(t,"H3",{class:!0});var hu=s(lt);bn=o(hu,"A",{id:!0,class:!0,href:!0});var OE=s(bn);mc=o(OE,"SPAN",{});var RE=s(mc);h(Po.$$.fragment,RE),RE.forEach(n),OE.forEach(n),wb=p(hu),fc=o(hu,"SPAN",{});var HE=s(fc);bb=i(HE,"FillMaskPipeline"),HE.forEach(n),hu.forEach(n),Xf=p(t),H=o(t,"DIV",{class:!0});var be=s(H);h(To.$$.fragment,be),kb=p(be),pt=o(be,"P",{});var Ki=s(pt);Pb=i(Ki,"Masked language modeling prediction pipeline using any "),hc=o(Ki,"CODE",{});var WE=s(hc);Tb=i(WE,"ModelWithLMHead"),WE.forEach(n),yb=i(Ki,". See the "),bi=o(Ki,"A",{href:!0});var QE=s(bi);xb=i(QE,`masked language modeling examples`),QE.forEach(n),$b=i(Ki," for more information."),Ki.forEach(n),Eb=p(be),ct=o(be,"P",{});var Ji=s(ct);qb=i(Ji,"This mask filling pipeline can currently be loaded from "),ki=o(Ji,"A",{href:!0});var VE=s(ki);Ab=i(VE,"pipeline()"),VE.forEach(n),Cb=i(Ji,` using the following task identifier: `),uc=o(Ji,"CODE",{});var BE=s(uc);zb=i(BE,'"fill-mask"'),BE.forEach(n),Db=i(Ji,"."),Ji.forEach(n),Ib=p(be),yo=o(be,"P",{});var uu=s(yo);jb=i(uu,`The models that this pipeline can use are models that have been trained with a masked language modeling objective, which includes the bi-directional models in the library. See the up-to-date list of available models on `),xo=o(uu,"A",{href:!0,rel:!0});var ZE=s(xo);Sb=i(ZE,"huggingface.co/models"),ZE.forEach(n),Mb=i(uu,"."),uu.forEach(n),Fb=p(be),h(kn.$$.fragment,be),Lb=p(be),Pn=o(be,"DIV",{class:!0});var gu=s(Pn);h($o.$$.fragment,gu),Ub=p(gu),gc=o(gu,"P",{});var YE=s(gc);Gb=i(YE,"Fill the masked token in the text(s) given as inputs."),YE.forEach(n),gu.forEach(n),be.forEach(n),Kf=p(t),dt=o(t,"H3",{class:!0});var _u=s(dt);Tn=o(_u,"A",{id:!0,class:!0,href:!0});var XE=s(Tn);_c=o(XE,"SPAN",{});var KE=s(_c);h(Eo.$$.fragment,KE),KE.forEach(n),XE.forEach(n),Nb=p(_u),vc=o(_u,"SPAN",{});var JE=s(vc);Ob=i(JE,"ImageClassificationPipeline"),JE.forEach(n),_u.forEach(n),Jf=p(t),te=o(t,"DIV",{class:!0});var Ge=s(te);h(qo.$$.fragment,Ge),Rb=p(Ge),Ao=o(Ge,"P",{});var vu=s(Ao);Hb=i(vu,"Image classification pipeline using any "),wc=o(vu,"CODE",{});var e6=s(wc);Wb=i(e6,"AutoModelForImageClassification"),e6.forEach(n),Qb=i(vu,`. This pipeline predicts the class of an image.`),vu.forEach(n),Vb=p(Ge),mt=o(Ge,"P",{});var el=s(mt);Bb=i(el,"This image classification pipeline can currently be loaded from "),Pi=o(el,"A",{href:!0});var t6=s(Pi);Zb=i(t6,"pipeline()"),t6.forEach(n),Yb=i(el,` using the following task identifier: `),bc=o(el,"CODE",{});var n6=s(bc);Xb=i(n6,'"image-classification"'),n6.forEach(n),Kb=i(el,"."),el.forEach(n),Jb=p(Ge),Co=o(Ge,"P",{});var wu=s(Co);ek=i(wu,`See the list of available models on `),zo=o(wu,"A",{href:!0,rel:!0});var r6=s(zo);tk=i(r6,"huggingface.co/models"),r6.forEach(n),nk=i(wu,"."),wu.forEach(n),rk=p(Ge),yn=o(Ge,"DIV",{class:!0});var bu=s(yn);h(Do.$$.fragment,bu),ok=p(bu),kc=o(bu,"P",{});var o6=s(kc);sk=i(o6,"Assign labels to the image(s) passed as inputs."),o6.forEach(n),bu.forEach(n),Ge.forEach(n),eh=p(t),ft=o(t,"H3",{class:!0});var ku=s(ft);xn=o(ku,"A",{id:!0,class:!0,href:!0});var s6=s(xn);Pc=o(s6,"SPAN",{});var a6=s(Pc);h(Io.$$.fragment,a6),a6.forEach(n),s6.forEach(n),ak=p(ku),Tc=o(ku,"SPAN",{});var i6=s(Tc);ik=i(i6,"ImageSegmentationPipeline"),i6.forEach(n),ku.forEach(n),th=p(t),ne=o(t,"DIV",{class:!0});var Ne=s(ne);h(jo.$$.fragment,Ne),lk=p(Ne),So=o(Ne,"P",{});var Pu=s(So);pk=i(Pu,"Image segmentation pipeline using any "),yc=o(Pu,"CODE",{});var l6=s(yc);ck=i(l6,"AutoModelForXXXSegmentation"),l6.forEach(n),dk=i(Pu,`. This pipeline predicts masks of objects and their classes.`),Pu.forEach(n),mk=p(Ne),ht=o(Ne,"P",{});var tl=s(ht);fk=i(tl,"This image segmentation pipeline can currently be loaded from "),Ti=o(tl,"A",{href:!0});var p6=s(Ti);hk=i(p6,"pipeline()"),p6.forEach(n),uk=i(tl,` using the following task identifier: `),xc=o(tl,"CODE",{});var c6=s(xc);gk=i(c6,'"image-segmentation"'),c6.forEach(n),_k=i(tl,"."),tl.forEach(n),vk=p(Ne),Mo=o(Ne,"P",{});var Tu=s(Mo);wk=i(Tu,`See the list of available models on `),Fo=o(Tu,"A",{href:!0,rel:!0});var d6=s(Fo);bk=i(d6,"huggingface.co/models"),d6.forEach(n),kk=i(Tu,"."),Tu.forEach(n),Pk=p(Ne),$n=o(Ne,"DIV",{class:!0});var yu=s($n);h(Lo.$$.fragment,yu),Tk=p(yu),$c=o(yu,"P",{});var m6=s($c);yk=i(m6,"Perform segmentation (detect masks & classes) in the image(s) passed as inputs."),m6.forEach(n),yu.forEach(n),Ne.forEach(n),nh=p(t),ut=o(t,"H3",{class:!0});var xu=s(ut);En=o(xu,"A",{id:!0,class:!0,href:!0});var f6=s(En);Ec=o(f6,"SPAN",{});var h6=s(Ec);h(Uo.$$.fragment,h6),h6.forEach(n),f6.forEach(n),xk=p(xu),qc=o(xu,"SPAN",{});var u6=s(qc);$k=i(u6,"NerPipeline"),u6.forEach(n),xu.forEach(n),rh=p(t),I=o(t,"DIV",{class:!0});var Z=s(I);h(Go.$$.fragment,Z),Ek=p(Z),gt=o(Z,"P",{});var nl=s(gt);qk=i(nl,"Named Entity Recognition pipeline using any "),Ac=o(nl,"CODE",{});var g6=s(Ac);Ak=i(g6,"ModelForTokenClassification"),g6.forEach(n),Ck=i(nl,". See the "),yi=o(nl,"A",{href:!0});var _6=s(yi);zk=i(_6,`named entity recognition examples`),_6.forEach(n),Dk=i(nl," for more information."),nl.forEach(n),Ik=p(Z),_t=o(Z,"P",{});var rl=s(_t);jk=i(rl,"This token recognition pipeline can currently be loaded from "),xi=o(rl,"A",{href:!0});var v6=s(xi);Sk=i(v6,"pipeline()"),v6.forEach(n),Mk=i(rl,` using the following task identifier: `),Cc=o(rl,"CODE",{});var w6=s(Cc);Fk=i(w6,'"ner"'),w6.forEach(n),Lk=i(rl," (for predicting the classes of tokens in a sequence: person, organisation, location or miscellaneous)."),rl.forEach(n),Uk=p(Z),No=o(Z,"P",{});var $u=s(No);Gk=i($u,`The models that this pipeline can use are models that have been fine-tuned on a token classification task. See the up-to-date list of available models on `),Oo=o($u,"A",{href:!0,rel:!0});var b6=s(Oo);Nk=i(b6,"huggingface.co/models"),b6.forEach(n),Ok=i($u,"."),$u.forEach(n),Rk=p(Z),ze=o(Z,"DIV",{class:!0});var ol=s(ze);h(Ro.$$.fragment,ol),Hk=p(ol),zc=o(ol,"P",{});var k6=s(zc);Wk=i(k6,"Override tokens from a given word that disagree to force agreement on word boundaries."),k6.forEach(n),Qk=p(ol),Dc=o(ol,"P",{});var P6=s(Dc);Vk=i(P6,`Example: micro|soft| com|pany| B-ENT I-NAME I-ENT I-ENT will be rewritten with first strategy as microsoft| company| B-ENT I-ENT`),P6.forEach(n),ol.forEach(n),Bk=p(Z),qn=o(Z,"DIV",{class:!0});var Eu=s(qn);h(Ho.$$.fragment,Eu),Zk=p(Eu),Ic=o(Eu,"P",{});var T6=s(Ic);Yk=i(T6,"Fuse various numpy arrays into dicts with all the information needed for aggregation"),T6.forEach(n),Eu.forEach(n),Xk=p(Z),An=o(Z,"DIV",{class:!0});var qu=s(An);h(Wo.$$.fragment,qu),Kk=p(qu),jc=o(qu,"P",{});var y6=s(jc);Jk=i(y6,"Find and group together the adjacent tokens with the same entity predicted."),y6.forEach(n),qu.forEach(n),eP=p(Z),Cn=o(Z,"DIV",{class:!0});var Au=s(Cn);h(Qo.$$.fragment,Au),tP=p(Au),Sc=o(Au,"P",{});var x6=s(Sc);nP=i(x6,"Group together the adjacent tokens with the same entity predicted."),x6.forEach(n),Au.forEach(n),Z.forEach(n),oh=p(t),zn=o(t,"P",{});var Cu=s(zn);rP=i(Cu,"See "),$i=o(Cu,"A",{href:!0});var $6=s($i);oP=i($6,"TokenClassificationPipeline"),$6.forEach(n),sP=i(Cu," for all details."),Cu.forEach(n),sh=p(t),vt=o(t,"H3",{class:!0});var zu=s(vt);Dn=o(zu,"A",{id:!0,class:!0,href:!0});var E6=s(Dn);Mc=o(E6,"SPAN",{});var q6=s(Mc);h(Vo.$$.fragment,q6),q6.forEach(n),E6.forEach(n),aP=p(zu),Fc=o(zu,"SPAN",{});var A6=s(Fc);iP=i(A6,"ObjectDetectionPipeline"),A6.forEach(n),zu.forEach(n),ah=p(t),re=o(t,"DIV",{class:!0});var Oe=s(re);h(Bo.$$.fragment,Oe),lP=p(Oe),Zo=o(Oe,"P",{});var Du=s(Zo);pP=i(Du,"Object detection pipeline using any "),Lc=o(Du,"CODE",{});var C6=s(Lc);cP=i(C6,"AutoModelForObjectDetection"),C6.forEach(n),dP=i(Du,`. This pipeline predicts bounding boxes of objects and their classes.`),Du.forEach(n),mP=p(Oe),wt=o(Oe,"P",{});var sl=s(wt);fP=i(sl,"This object detection pipeline can currently be loaded from "),Ei=o(sl,"A",{href:!0});var z6=s(Ei);hP=i(z6,"pipeline()"),z6.forEach(n),uP=i(sl,` using the following task identifier: `),Uc=o(sl,"CODE",{});var D6=s(Uc);gP=i(D6,'"object-detection"'),D6.forEach(n),_P=i(sl,"."),sl.forEach(n),vP=p(Oe),Yo=o(Oe,"P",{});var Iu=s(Yo);wP=i(Iu,"See the list of available models on "),Xo=o(Iu,"A",{href:!0,rel:!0});var I6=s(Xo);bP=i(I6,"huggingface.co/models"),I6.forEach(n),kP=i(Iu,"."),Iu.forEach(n),PP=p(Oe),In=o(Oe,"DIV",{class:!0});var ju=s(In);h(Ko.$$.fragment,ju),TP=p(ju),Gc=o(ju,"P",{});var j6=s(Gc);yP=i(j6,"Detect objects (bounding boxes & classes) in the image(s) passed as inputs."),j6.forEach(n),ju.forEach(n),Oe.forEach(n),ih=p(t),bt=o(t,"H3",{class:!0});var Su=s(bt);jn=o(Su,"A",{id:!0,class:!0,href:!0});var S6=s(jn);Nc=o(S6,"SPAN",{});var M6=s(Nc);h(Jo.$$.fragment,M6),M6.forEach(n),S6.forEach(n),xP=p(Su),Oc=o(Su,"SPAN",{});var F6=s(Oc);$P=i(F6,"QuestionAnsweringPipeline"),F6.forEach(n),Su.forEach(n),lh=p(t),j=o(t,"DIV",{class:!0});var Y=s(j);h(es.$$.fragment,Y),EP=p(Y),kt=o(Y,"P",{});var al=s(kt);qP=i(al,"Question Answering pipeline using any "),Rc=o(al,"CODE",{});var L6=s(Rc);AP=i(L6,"ModelForQuestionAnswering"),L6.forEach(n),CP=i(al,". See the "),qi=o(al,"A",{href:!0});var U6=s(qi);zP=i(U6,`question answering examples`),U6.forEach(n),DP=i(al," for more information."),al.forEach(n),IP=p(Y),Pt=o(Y,"P",{});var il=s(Pt);jP=i(il,"This question answering pipeline can currently be loaded from "),Ai=o(il,"A",{href:!0});var G6=s(Ai);SP=i(G6,"pipeline()"),G6.forEach(n),MP=i(il,` using the following task identifier: `),Hc=o(il,"CODE",{});var N6=s(Hc);FP=i(N6,'"question-answering"'),N6.forEach(n),LP=i(il,"."),il.forEach(n),UP=p(Y),ts=o(Y,"P",{});var Mu=s(ts);GP=i(Mu,`The models that this pipeline can use are models that have been fine-tuned on a question answering task. See the up-to-date list of available models on `),ns=o(Mu,"A",{href:!0,rel:!0});var O6=s(ns);NP=i(O6,"huggingface.co/models"),O6.forEach(n),OP=i(Mu,"."),Mu.forEach(n),RP=p(Y),Sn=o(Y,"DIV",{class:!0});var Fu=s(Sn);h(rs.$$.fragment,Fu),HP=p(Fu),Wc=o(Fu,"P",{});var R6=s(Wc);WP=i(R6,"Answer the question(s) given as inputs by using the context(s)."),R6.forEach(n),Fu.forEach(n),QP=p(Y),De=o(Y,"DIV",{class:!0});var ll=s(De);h(os.$$.fragment,ll),VP=p(ll),Mn=o(ll,"P",{});var Qm=s(Mn);BP=i(Qm,"QuestionAnsweringPipeline leverages the "),Qc=o(Qm,"CODE",{});var H6=s(Qc);ZP=i(H6,"SquadExample"),H6.forEach(n),YP=i(Qm,`internally. This helper method encapsulate all the logic for converting question(s) and context(s) to `),Vc=o(Qm,"CODE",{});var W6=s(Vc);XP=i(W6,"SquadExample"),W6.forEach(n),Qm.forEach(n),KP=p(ll),Bc=o(ll,"P",{});var Q6=s(Bc);JP=i(Q6,"We currently support extractive question answering."),Q6.forEach(n),ll.forEach(n),eT=p(Y),Ie=o(Y,"DIV",{class:!0});var pl=s(Ie);h(ss.$$.fragment,pl),tT=p(pl),as=o(pl,"P",{});var Lu=s(as);nT=i(Lu,"Take the output of any "),Zc=o(Lu,"CODE",{});var V6=s(Zc);rT=i(V6,"ModelForQuestionAnswering"),V6.forEach(n),oT=i(Lu,` and will generate probabilities for each span to be the actual answer.`),Lu.forEach(n),sT=p(pl),Yc=o(pl,"P",{});var B6=s(Yc);aT=i(B6,`In addition, it filters out some unwanted/impossible cases like answer len being greater than max_answer_len or answer end position being before the starting position. The method supports output the k-best answer through the topk argument.`),B6.forEach(n),pl.forEach(n),iT=p(Y),Fn=o(Y,"DIV",{class:!0});var Uu=s(Fn);h(is.$$.fragment,Uu),lT=p(Uu),Xc=o(Uu,"P",{});var Z6=s(Xc);pT=i(Z6,"When decoding from token probabilities, this method maps token indexes to actual word in the initial context."),Z6.forEach(n),Uu.forEach(n),Y.forEach(n),ph=p(t),Tt=o(t,"H3",{class:!0});var Gu=s(Tt);Ln=o(Gu,"A",{id:!0,class:!0,href:!0});var Y6=s(Ln);Kc=o(Y6,"SPAN",{});var X6=s(Kc);h(ls.$$.fragment,X6),X6.forEach(n),Y6.forEach(n),cT=p(Gu),Jc=o(Gu,"SPAN",{});var K6=s(Jc);dT=i(K6,"SummarizationPipeline"),K6.forEach(n),Gu.forEach(n),ch=p(t),F=o(t,"DIV",{class:!0});var ce=s(F);h(ps.$$.fragment,ce),mT=p(ce),ed=o(ce,"P",{});var J6=s(ed);fT=i(J6,"Summarize news articles and other documents."),J6.forEach(n),hT=p(ce),yt=o(ce,"P",{});var cl=s(yt);uT=i(cl,"This summarizing pipeline can currently be loaded from "),Ci=o(cl,"A",{href:!0});var e5=s(Ci);gT=i(e5,"pipeline()"),e5.forEach(n),_T=i(cl,` using the following task identifier: `),td=o(cl,"CODE",{});var t5=s(td);vT=i(t5,'"summarization"'),t5.forEach(n),wT=i(cl,"."),cl.forEach(n),bT=p(ce),L=o(ce,"P",{});var X=s(L);kT=i(X,`The models that this pipeline can use are models that have been fine-tuned on a summarization task, which is currently, \u2019`),nd=o(X,"EM",{});var n5=s(nd);PT=i(n5,"bart-large-cnn"),n5.forEach(n),TT=i(X,"\u2019, \u2019"),rd=o(X,"EM",{});var r5=s(rd);yT=i(r5,"t5-small"),r5.forEach(n),xT=i(X,"\u2019, \u2019"),od=o(X,"EM",{});var o5=s(od);$T=i(o5,"t5-base"),o5.forEach(n),ET=i(X,"\u2019, \u2019"),sd=o(X,"EM",{});var s5=s(sd);qT=i(s5,"t5-large"),s5.forEach(n),AT=i(X,"\u2019, \u2019"),ad=o(X,"EM",{});var a5=s(ad);CT=i(a5,"t5-3b"),a5.forEach(n),zT=i(X,"\u2019, \u2019"),id=o(X,"EM",{});var i5=s(id);DT=i(i5,"t5-11b"),i5.forEach(n),IT=i(X,`\u2019. See the up-to-date list of available models on `),cs=o(X,"A",{href:!0,rel:!0});var l5=s(cs);jT=i(l5,"huggingface.co/models"),l5.forEach(n),ST=i(X,"."),X.forEach(n),MT=p(ce),ld=o(ce,"P",{});var p5=s(ld);FT=i(p5,"Usage:"),p5.forEach(n),LT=p(ce),h(ds.$$.fragment,ce),UT=p(ce),Un=o(ce,"DIV",{class:!0});var Nu=s(Un);h(ms.$$.fragment,Nu),GT=p(Nu),pd=o(Nu,"P",{});var c5=s(pd);NT=i(c5,"Summarize the text(s) given as inputs."),c5.forEach(n),Nu.forEach(n),ce.forEach(n),dh=p(t),xt=o(t,"H3",{class:!0});var Ou=s(xt);Gn=o(Ou,"A",{id:!0,class:!0,href:!0});var d5=s(Gn);cd=o(d5,"SPAN",{});var m5=s(cd);h(fs.$$.fragment,m5),m5.forEach(n),d5.forEach(n),OT=p(Ou),dd=o(Ou,"SPAN",{});var f5=s(dd);RT=i(f5,"TableQuestionAnsweringPipeline"),f5.forEach(n),Ou.forEach(n),mh=p(t),oe=o(t,"DIV",{class:!0});var Re=s(oe);h(hs.$$.fragment,Re),HT=p(Re),us=o(Re,"P",{});var Ru=s(us);WT=i(Ru,"Table Question Answering pipeline using a "),md=o(Ru,"CODE",{});var h5=s(md);QT=i(h5,"ModelForTableQuestionAnswering"),h5.forEach(n),VT=i(Ru,`. This pipeline is only available in PyTorch.`),Ru.forEach(n),BT=p(Re),$t=o(Re,"P",{});var dl=s($t);ZT=i(dl,"This tabular question answering pipeline can currently be loaded from "),zi=o(dl,"A",{href:!0});var u5=s(zi);YT=i(u5,"pipeline()"),u5.forEach(n),XT=i(dl,` using the following task identifier: `),fd=o(dl,"CODE",{});var g5=s(fd);KT=i(g5,'"table-question-answering"'),g5.forEach(n),JT=i(dl,"."),dl.forEach(n),ey=p(Re),gs=o(Re,"P",{});var Hu=s(gs);ty=i(Hu,`The models that this pipeline can use are models that have been fine-tuned on a tabular question answering task. See the up-to-date list of available models on `),_s=o(Hu,"A",{href:!0,rel:!0});var _5=s(_s);ny=i(_5,"huggingface.co/models"),_5.forEach(n),ry=i(Hu,"."),Hu.forEach(n),oy=p(Re),C=o(Re,"DIV",{class:!0});var G=s(C);h(vs.$$.fragment,G),sy=p(G),hd=o(G,"P",{});var v5=s(hd);ay=i(v5,"Answers queries according to a table. The pipeline accepts several types of inputs which are detailed below:"),v5.forEach(n),iy=p(G),W=o(G,"UL",{});var de=s(W);ud=o(de,"LI",{});var w5=s(ud);gd=o(w5,"CODE",{});var b5=s(gd);ly=i(b5,"pipeline(table, query)"),b5.forEach(n),w5.forEach(n),py=p(de),_d=o(de,"LI",{});var k5=s(_d);vd=o(k5,"CODE",{});var P5=s(vd);cy=i(P5,"pipeline(table, [query])"),P5.forEach(n),k5.forEach(n),dy=p(de),wd=o(de,"LI",{});var T5=s(wd);bd=o(T5,"CODE",{});var y5=s(bd);my=i(y5,"pipeline(table=table, query=query)"),y5.forEach(n),T5.forEach(n),fy=p(de),kd=o(de,"LI",{});var x5=s(kd);Pd=o(x5,"CODE",{});var $5=s(Pd);hy=i($5,"pipeline(table=table, query=[query])"),$5.forEach(n),x5.forEach(n),uy=p(de),Td=o(de,"LI",{});var E5=s(Td);yd=o(E5,"CODE",{});var q5=s(yd);gy=i(q5,'pipeline({"table": table, "query": query})'),q5.forEach(n),E5.forEach(n),_y=p(de),xd=o(de,"LI",{});var A5=s(xd);$d=o(A5,"CODE",{});var C5=s($d);vy=i(C5,'pipeline({"table": table, "query": [query]})'),C5.forEach(n),A5.forEach(n),wy=p(de),Ed=o(de,"LI",{});var z5=s(Ed);qd=o(z5,"CODE",{});var D5=s(qd);by=i(D5,'pipeline([{"table": table, "query": query}, {"table": table, "query": query}])'),D5.forEach(n),z5.forEach(n),de.forEach(n),ky=p(G),ws=o(G,"P",{});var Wu=s(ws);Py=i(Wu,"The "),Ad=o(Wu,"CODE",{});var I5=s(Ad);Ty=i(I5,"table"),I5.forEach(n),yy=i(Wu," argument should be a dict or a DataFrame built from that dict, containing the whole table:"),Wu.forEach(n),xy=p(G),Cd=o(G,"P",{});var j5=s(Cd);$y=i(j5,"Example:"),j5.forEach(n),Ey=p(G),h(bs.$$.fragment,G),qy=p(G),zd=o(G,"P",{});var S5=s(zd);Ay=i(S5,"This dictionary can be passed in as such, or can be converted to a pandas DataFrame:"),S5.forEach(n),Cy=p(G),Dd=o(G,"P",{});var M5=s(Dd);zy=i(M5,"Example:"),M5.forEach(n),Dy=p(G),h(ks.$$.fragment,G),G.forEach(n),Re.forEach(n),fh=p(t),Et=o(t,"H3",{class:!0});var Qu=s(Et);Nn=o(Qu,"A",{id:!0,class:!0,href:!0});var F5=s(Nn);Id=o(F5,"SPAN",{});var L5=s(Id);h(Ps.$$.fragment,L5),L5.forEach(n),F5.forEach(n),Iy=p(Qu),jd=o(Qu,"SPAN",{});var U5=s(jd);jy=i(U5,"TextClassificationPipeline"),U5.forEach(n),Qu.forEach(n),hh=p(t),Q=o(t,"DIV",{class:!0});var ke=s(Q);h(Ts.$$.fragment,ke),Sy=p(ke),qt=o(ke,"P",{});var ml=s(qt);My=i(ml,"Text classification pipeline using any "),Sd=o(ml,"CODE",{});var G5=s(Sd);Fy=i(G5,"ModelForSequenceClassification"),G5.forEach(n),Ly=i(ml,". See the "),Di=o(ml,"A",{href:!0});var N5=s(Di);Uy=i(N5,`sequence classification examples`),N5.forEach(n),Gy=i(ml," for more information."),ml.forEach(n),Ny=p(ke),At=o(ke,"P",{});var fl=s(At);Oy=i(fl,"This text classification pipeline can currently be loaded from "),Ii=o(fl,"A",{href:!0});var O5=s(Ii);Ry=i(O5,"pipeline()"),O5.forEach(n),Hy=i(fl,` using the following task identifier: `),Md=o(fl,"CODE",{});var R5=s(Md);Wy=i(R5,'"sentiment-analysis"'),R5.forEach(n),Qy=i(fl," (for classifying sequences according to positive or negative sentiments)."),fl.forEach(n),Vy=p(ke),ys=o(ke,"P",{});var Vu=s(ys);By=i(Vu,"If multiple classification labels are available ("),Fd=o(Vu,"CODE",{});var H5=s(Fd);Zy=i(H5,"model.config.num_labels >= 2"),H5.forEach(n),Yy=i(Vu,`), the pipeline will run a softmax over the results. If there is a single label, the pipeline will run a sigmoid over the result.`),Vu.forEach(n),Xy=p(ke),xs=o(ke,"P",{});var Bu=s(xs);Ky=i(Bu,`The models that this pipeline can use are models that have been fine-tuned on a sequence classification task. See the up-to-date list of available models on `),$s=o(Bu,"A",{href:!0,rel:!0});var W5=s($s);Jy=i(W5,"huggingface.co/models"),W5.forEach(n),e2=i(Bu,"."),Bu.forEach(n),t2=p(ke),On=o(ke,"DIV",{class:!0});var Zu=s(On);h(Es.$$.fragment,Zu),n2=p(Zu),Ld=o(Zu,"P",{});var Q5=s(Ld);r2=i(Q5,"Classify the text(s) given as inputs."),Q5.forEach(n),Zu.forEach(n),ke.forEach(n),uh=p(t),Ct=o(t,"H3",{class:!0});var Yu=s(Ct);Rn=o(Yu,"A",{id:!0,class:!0,href:!0});var V5=s(Rn);Ud=o(V5,"SPAN",{});var B5=s(Ud);h(qs.$$.fragment,B5),B5.forEach(n),V5.forEach(n),o2=p(Yu),Gd=o(Yu,"SPAN",{});var Z5=s(Gd);s2=i(Z5,"TextGenerationPipeline"),Z5.forEach(n),Yu.forEach(n),gh=p(t),se=o(t,"DIV",{class:!0});var He=s(se);h(As.$$.fragment,He),a2=p(He),Cs=o(He,"P",{});var Xu=s(Cs);i2=i(Xu,"Language generation pipeline using any "),Nd=o(Xu,"CODE",{});var Y5=s(Nd);l2=i(Y5,"ModelWithLMHead"),Y5.forEach(n),p2=i(Xu,`. This pipeline predicts the words that will follow a specified text prompt.`),Xu.forEach(n),c2=p(He),zt=o(He,"P",{});var hl=s(zt);d2=i(hl,"This language generation pipeline can currently be loaded from "),ji=o(hl,"A",{href:!0});var X5=s(ji);m2=i(X5,"pipeline()"),X5.forEach(n),f2=i(hl,` using the following task identifier: `),Od=o(hl,"CODE",{});var K5=s(Od);h2=i(K5,'"text-generation"'),K5.forEach(n),u2=i(hl,"."),hl.forEach(n),g2=p(He),zs=o(He,"P",{});var Ku=s(zs);_2=i(Ku,`The models that this pipeline can use are models that have been trained with an autoregressive language modeling objective, which includes the uni-directional models in the library (e.g. gpt2). See the list of available models on `),Ds=o(Ku,"A",{href:!0,rel:!0});var J5=s(Ds);v2=i(J5,"huggingface.co/models"),J5.forEach(n),w2=i(Ku,"."),Ku.forEach(n),b2=p(He),Hn=o(He,"DIV",{class:!0});var Ju=s(Hn);h(Is.$$.fragment,Ju),k2=p(Ju),Rd=o(Ju,"P",{});var eq=s(Rd);P2=i(eq,"Complete the prompt(s) given as inputs."),eq.forEach(n),Ju.forEach(n),He.forEach(n),_h=p(t),Dt=o(t,"H3",{class:!0});var eg=s(Dt);Wn=o(eg,"A",{id:!0,class:!0,href:!0});var tq=s(Wn);Hd=o(tq,"SPAN",{});var nq=s(Hd);h(js.$$.fragment,nq),nq.forEach(n),tq.forEach(n),T2=p(eg),Wd=o(eg,"SPAN",{});var rq=s(Wd);y2=i(rq,"Text2TextGenerationPipeline"),rq.forEach(n),eg.forEach(n),vh=p(t),S=o(t,"DIV",{class:!0});var K=s(S);h(Ss.$$.fragment,K),x2=p(K),Qd=o(K,"P",{});var oq=s(Qd);$2=i(oq,"Pipeline for text to text generation using seq2seq models."),oq.forEach(n),E2=p(K),It=o(K,"P",{});var ul=s(It);q2=i(ul,"This Text2TextGenerationPipeline pipeline can currently be loaded from "),Si=o(ul,"A",{href:!0});var sq=s(Si);A2=i(sq,"pipeline()"),sq.forEach(n),C2=i(ul,` using the following task identifier: `),Vd=o(ul,"CODE",{});var aq=s(Vd);z2=i(aq,'"text2text-generation"'),aq.forEach(n),D2=i(ul,"."),ul.forEach(n),I2=p(K),Ms=o(K,"P",{});var tg=s(Ms);j2=i(tg,`The models that this pipeline can use are models that have been fine-tuned on a translation task. See the up-to-date list of available models on `),Fs=o(tg,"A",{href:!0,rel:!0});var iq=s(Fs);S2=i(iq,"huggingface.co/models"),iq.forEach(n),M2=i(tg,"."),tg.forEach(n),F2=p(K),Bd=o(K,"P",{});var lq=s(Bd);L2=i(lq,"Usage:"),lq.forEach(n),U2=p(K),h(Ls.$$.fragment,K),G2=p(K),Qn=o(K,"DIV",{class:!0});var ng=s(Qn);h(Us.$$.fragment,ng),N2=p(ng),Zd=o(ng,"P",{});var pq=s(Zd);O2=i(pq,"Generate the output text(s) using text(s) given as inputs."),pq.forEach(n),ng.forEach(n),R2=p(K),Vn=o(K,"DIV",{class:!0});var rg=s(Vn);h(Gs.$$.fragment,rg),H2=p(rg),Yd=o(rg,"P",{});var cq=s(Yd);W2=i(cq,"Checks whether there might be something wrong with given input with regard to the model."),cq.forEach(n),rg.forEach(n),K.forEach(n),wh=p(t),jt=o(t,"H3",{class:!0});var og=s(jt);Bn=o(og,"A",{id:!0,class:!0,href:!0});var dq=s(Bn);Xd=o(dq,"SPAN",{});var mq=s(Xd);h(Ns.$$.fragment,mq),mq.forEach(n),dq.forEach(n),Q2=p(og),Kd=o(og,"SPAN",{});var fq=s(Kd);V2=i(fq,"TokenClassificationPipeline"),fq.forEach(n),og.forEach(n),bh=p(t),q=o(t,"DIV",{class:!0});var N=s(q);h(Os.$$.fragment,N),B2=p(N),St=o(N,"P",{});var gl=s(St);Z2=i(gl,"Named Entity Recognition pipeline using any "),Jd=o(gl,"CODE",{});var hq=s(Jd);Y2=i(hq,"ModelForTokenClassification"),hq.forEach(n),X2=i(gl,". See the "),Mi=o(gl,"A",{href:!0});var uq=s(Mi);K2=i(uq,`named entity recognition examples`),uq.forEach(n),J2=i(gl," for more information."),gl.forEach(n),e4=p(N),Mt=o(N,"P",{});var _l=s(Mt);t4=i(_l,"This token recognition pipeline can currently be loaded from "),Fi=o(_l,"A",{href:!0});var gq=s(Fi);n4=i(gq,"pipeline()"),gq.forEach(n),r4=i(_l,` using the following task identifier: `),em=o(_l,"CODE",{});var _q=s(em);o4=i(_q,'"ner"'),_q.forEach(n),s4=i(_l," (for predicting the classes of tokens in a sequence: person, organisation, location or miscellaneous)."),_l.forEach(n),a4=p(N),Rs=o(N,"P",{});var sg=s(Rs);i4=i(sg,`The models that this pipeline can use are models that have been fine-tuned on a token classification task. See the up-to-date list of available models on `),Hs=o(sg,"A",{href:!0,rel:!0});var vq=s(Hs);l4=i(vq,"huggingface.co/models"),vq.forEach(n),p4=i(sg,"."),sg.forEach(n),c4=p(N),Zn=o(N,"DIV",{class:!0});var ag=s(Zn);h(Ws.$$.fragment,ag),d4=p(ag),tm=o(ag,"P",{});var wq=s(tm);m4=i(wq,"Classify each token of the text(s) given as inputs."),wq.forEach(n),ag.forEach(n),f4=p(N),je=o(N,"DIV",{class:!0});var vl=s(je);h(Qs.$$.fragment,vl),h4=p(vl),nm=o(vl,"P",{});var bq=s(nm);u4=i(bq,"Override tokens from a given word that disagree to force agreement on word boundaries."),bq.forEach(n),g4=p(vl),rm=o(vl,"P",{});var kq=s(rm);_4=i(kq,`Example: micro|soft| com|pany| B-ENT I-NAME I-ENT I-ENT will be rewritten with first strategy as microsoft| company| B-ENT I-ENT`),kq.forEach(n),vl.forEach(n),v4=p(N),Yn=o(N,"DIV",{class:!0});var ig=s(Yn);h(Vs.$$.fragment,ig),w4=p(ig),om=o(ig,"P",{});var Pq=s(om);b4=i(Pq,"Fuse various numpy arrays into dicts with all the information needed for aggregation"),Pq.forEach(n),ig.forEach(n),k4=p(N),Xn=o(N,"DIV",{class:!0});var lg=s(Xn);h(Bs.$$.fragment,lg),P4=p(lg),sm=o(lg,"P",{});var Tq=s(sm);T4=i(Tq,"Find and group together the adjacent tokens with the same entity predicted."),Tq.forEach(n),lg.forEach(n),y4=p(N),Kn=o(N,"DIV",{class:!0});var pg=s(Kn);h(Zs.$$.fragment,pg),x4=p(pg),am=o(pg,"P",{});var yq=s(am);$4=i(yq,"Group together the adjacent tokens with the same entity predicted."),yq.forEach(n),pg.forEach(n),N.forEach(n),kh=p(t),Ft=o(t,"H3",{class:!0});var cg=s(Ft);Jn=o(cg,"A",{id:!0,class:!0,href:!0});var xq=s(Jn);im=o(xq,"SPAN",{});var $q=s(im);h(Ys.$$.fragment,$q),$q.forEach(n),xq.forEach(n),E4=p(cg),lm=o(cg,"SPAN",{});var Eq=s(lm);q4=i(Eq,"TranslationPipeline"),Eq.forEach(n),cg.forEach(n),Ph=p(t),U=o(t,"DIV",{class:!0});var me=s(U);h(Xs.$$.fragment,me),A4=p(me),pm=o(me,"P",{});var qq=s(pm);C4=i(qq,"Translates from one language to another."),qq.forEach(n),z4=p(me),Lt=o(me,"P",{});var wl=s(Lt);D4=i(wl,"This translation pipeline can currently be loaded from "),Li=o(wl,"A",{href:!0});var Aq=s(Li);I4=i(Aq,"pipeline()"),Aq.forEach(n),j4=i(wl,` using the following task identifier: `),cm=o(wl,"CODE",{});var Cq=s(cm);S4=i(Cq,'"translation_xx_to_yy"'),Cq.forEach(n),M4=i(wl,"."),wl.forEach(n),F4=p(me),Ks=o(me,"P",{});var dg=s(Ks);L4=i(dg,`The models that this pipeline can use are models that have been fine-tuned on a translation task. See the up-to-date list of available models on `),Js=o(dg,"A",{href:!0,rel:!0});var zq=s(Js);U4=i(zq,"huggingface.co/models"),zq.forEach(n),G4=i(dg,"."),dg.forEach(n),N4=p(me),dm=o(me,"P",{});var Dq=s(dm);O4=i(Dq,"Usage:"),Dq.forEach(n),R4=p(me),h(ea.$$.fragment,me),H4=p(me),er=o(me,"DIV",{class:!0});var mg=s(er);h(ta.$$.fragment,mg),W4=p(mg),mm=o(mg,"P",{});var Iq=s(mm);Q4=i(Iq,"Translate the text(s) given as inputs."),Iq.forEach(n),mg.forEach(n),me.forEach(n),Th=p(t),Ut=o(t,"H3",{class:!0});var fg=s(Ut);tr=o(fg,"A",{id:!0,class:!0,href:!0});var jq=s(tr);fm=o(jq,"SPAN",{});var Sq=s(fm);h(na.$$.fragment,Sq),Sq.forEach(n),jq.forEach(n),V4=p(fg),hm=o(fg,"SPAN",{});var Mq=s(hm);B4=i(Mq,"ZeroShotClassificationPipeline"),Mq.forEach(n),fg.forEach(n),yh=p(t),V=o(t,"DIV",{class:!0});var Pe=s(V);h(ra.$$.fragment,Pe),Z4=p(Pe),oa=o(Pe,"P",{});var hg=s(oa);Y4=i(hg,"NLI-based zero-shot classification pipeline using a "),um=o(hg,"CODE",{});var Fq=s(um);X4=i(Fq,"ModelForSequenceClassification"),Fq.forEach(n),K4=i(hg,` trained on NLI (natural language inference) tasks.`),hg.forEach(n),J4=p(Pe),Ee=o(Pe,"P",{});var gr=s(Ee);e0=i(gr,`Any combination of sequences and labels can be passed and each combination will be posed as a premise/hypothesis pair and passed to the pretrained model. Then, the logit for `),gm=o(gr,"EM",{});var Lq=s(gm);t0=i(Lq,"entailment"),Lq.forEach(n),n0=i(gr,` is taken as the logit for the candidate label being valid. Any NLI model can be used, but the id of the `),_m=o(gr,"EM",{});var Uq=s(_m);r0=i(Uq,"entailment"),Uq.forEach(n),o0=i(gr,` label must be included in the model config\u2019s :attr:`),vm=o(gr,"EM",{});var Gq=s(vm);s0=i(Gq,"~transformers.PretrainedConfig.label2id"),Gq.forEach(n),a0=i(gr,"."),gr.forEach(n),i0=p(Pe),Gt=o(Pe,"P",{});var bl=s(Gt);l0=i(bl,"This NLI pipeline can currently be loaded from "),Ui=o(bl,"A",{href:!0});var Nq=s(Ui);p0=i(Nq,"pipeline()"),Nq.forEach(n),c0=i(bl,` using the following task identifier: `),wm=o(bl,"CODE",{});var Oq=s(wm);d0=i(Oq,'"zero-shot-classification"'),Oq.forEach(n),m0=i(bl,"."),bl.forEach(n),f0=p(Pe),sa=o(Pe,"P",{});var ug=s(sa);h0=i(ug,`The models that this pipeline can use are models that have been fine-tuned on an NLI task. See the up-to-date list of available models on `),aa=o(ug,"A",{href:!0,rel:!0});var Rq=s(aa);u0=i(Rq,"huggingface.co/models"),Rq.forEach(n),g0=i(ug,"."),ug.forEach(n),_0=p(Pe),nr=o(Pe,"DIV",{class:!0});var gg=s(nr);h(ia.$$.fragment,gg),v0=p(gg),la=o(gg,"P",{});var _g=s(la);w0=i(_g,"Classify the sequence(s) given as inputs. See the "),Gi=o(_g,"A",{href:!0});var Hq=s(Gi);b0=i(Hq,"ZeroShotClassificationPipeline"),Hq.forEach(n),k0=i(_g,` documentation for more information.`),_g.forEach(n),gg.forEach(n),Pe.forEach(n),xh=p(t),Nt=o(t,"H3",{class:!0});var vg=s(Nt);rr=o(vg,"A",{id:!0,class:!0,href:!0});var Wq=s(rr);bm=o(Wq,"SPAN",{});var Qq=s(bm);h(pa.$$.fragment,Qq),Qq.forEach(n),Wq.forEach(n),P0=p(vg),km=o(vg,"SPAN",{});var Vq=s(km);T0=i(Vq,"ZeroShotImageClassificationPipeline"),Vq.forEach(n),vg.forEach(n),$h=p(t),ae=o(t,"DIV",{class:!0});var We=s(ae);h(ca.$$.fragment,We),y0=p(We),Ot=o(We,"P",{});var kl=s(Ot);x0=i(kl,"Zero shot image classification pipeline using "),Pm=o(kl,"CODE",{});var Bq=s(Pm);$0=i(Bq,"CLIPModel"),Bq.forEach(n),E0=i(kl,`. This pipeline predicts the class of an image when you provide an image and a set of `),Tm=o(kl,"CODE",{});var Zq=s(Tm);q0=i(Zq,"candidate_labels"),Zq.forEach(n),A0=i(kl,"."),kl.forEach(n),C0=p(We),Rt=o(We,"P",{});var Pl=s(Rt);z0=i(Pl,"This image classification pipeline can currently be loaded from "),Ni=o(Pl,"A",{href:!0});var Yq=s(Ni);D0=i(Yq,"pipeline()"),Yq.forEach(n),I0=i(Pl,` using the following task identifier: `),ym=o(Pl,"CODE",{});var Xq=s(ym);j0=i(Xq,'"zero-shot-image-classification"'),Xq.forEach(n),S0=i(Pl,"."),Pl.forEach(n),M0=p(We),da=o(We,"P",{});var wg=s(da);F0=i(wg,`See the list of available models on `),ma=o(wg,"A",{href:!0,rel:!0});var Kq=s(ma);L0=i(Kq,"huggingface.co/models"),Kq.forEach(n),U0=i(wg,"."),wg.forEach(n),G0=p(We),or=o(We,"DIV",{class:!0});var bg=s(or);h(fa.$$.fragment,bg),N0=p(bg),xm=o(bg,"P",{});var Jq=s(xm);O0=i(Jq,"Assign labels to the image(s) passed as inputs."),Jq.forEach(n),bg.forEach(n),We.forEach(n),Eh=p(t),Ht=o(t,"H2",{class:!0});var kg=s(Ht);sr=o(kg,"A",{id:!0,class:!0,href:!0});var eA=s(sr);$m=o(eA,"SPAN",{});var tA=s($m);h(ha.$$.fragment,tA),tA.forEach(n),eA.forEach(n),R0=p(kg),Oi=o(kg,"SPAN",{});var Vx=s(Oi);H0=i(Vx,"Parent class: "),Em=o(Vx,"CODE",{});var nA=s(Em);W0=i(nA,"Pipeline"),nA.forEach(n),Vx.forEach(n),kg.forEach(n),qh=p(t),P=o(t,"DIV",{class:!0});var T=s(P);h(ua.$$.fragment,T),Q0=p(T),qm=o(T,"P",{});var rA=s(qm);V0=i(rA,`The Pipeline class is the class from which all pipelines inherit. Refer to this class for methods shared across different pipelines.`),rA.forEach(n),B0=p(T),Am=o(T,"P",{});var oA=s(Am);Z0=i(oA,`Base class implementing pipelined operations. Pipeline workflow is defined as a sequence of the following operations:`),oA.forEach(n),Y0=p(T),Cm=o(T,"P",{});var sA=s(Cm);X0=i(sA,"Input -> Tokenization -> Model Inference -> Post-Processing (task dependent) -> Output"),sA.forEach(n),K0=p(T),zm=o(T,"P",{});var aA=s(zm);J0=i(aA,"Pipeline supports running on CPU or GPU through the device argument (see below)."),aA.forEach(n),ex=p(T),_e=o(T,"P",{});var Qe=s(_e);tx=i(Qe,"Some pipeline, like for instance "),Ri=o(Qe,"A",{href:!0});var iA=s(Ri);nx=i(iA,"FeatureExtractionPipeline"),iA.forEach(n),rx=i(Qe," ("),Dm=o(Qe,"CODE",{});var lA=s(Dm);ox=i(lA,"'feature-extraction'"),lA.forEach(n),sx=i(Qe,`) output large tensor object as nested-lists. In order to avoid dumping such large structure as textual data we provide the `),Im=o(Qe,"CODE",{});var pA=s(Im);ax=i(pA,"binary_output"),pA.forEach(n),ix=i(Qe,` constructor argument. If set to `),jm=o(Qe,"CODE",{});var cA=s(jm);lx=i(cA,"True"),cA.forEach(n),px=i(Qe,", the output will be stored in the pickle format."),Qe.forEach(n),cx=p(T),ar=o(T,"DIV",{class:!0});var Pg=s(ar);h(ga.$$.fragment,Pg),dx=p(Pg),Sm=o(Pg,"P",{});var dA=s(Sm);mx=i(dA,"Check if the model class is in supported by the pipeline."),dA.forEach(n),Pg.forEach(n),fx=p(T),ve=o(T,"DIV",{class:!0});var _r=s(ve);h(_a.$$.fragment,_r),hx=p(_r),Mm=o(_r,"P",{});var mA=s(Mm);ux=i(mA,"Context Manager allowing tensor allocation on the user-specified device in framework agnostic way."),mA.forEach(n),gx=p(_r),Fm=o(_r,"P",{});var fA=s(Fm);_x=i(fA,"Examples:"),fA.forEach(n),vx=p(_r),h(va.$$.fragment,_r),_r.forEach(n),wx=p(T),ir=o(T,"DIV",{class:!0});var Tg=s(ir);h(wa.$$.fragment,Tg),bx=p(Tg),Lm=o(Tg,"P",{});var hA=s(Lm);kx=i(hA,"Ensure PyTorch tensors are on the specified device."),hA.forEach(n),Tg.forEach(n),Px=p(T),lr=o(T,"DIV",{class:!0});var yg=s(lr);h(ba.$$.fragment,yg),Tx=p(yg),ka=o(yg,"P",{});var xg=s(ka);yx=i(xg,"Postprocess will receive the raw outputs of the "),Um=o(xg,"CODE",{});var uA=s(Um);xx=i(uA,"_forward"),uA.forEach(n),$x=i(xg,` method, generally tensors, and reformat them into something more friendly. Generally it will output a list or a dict or results (containing just strings and numbers).`),xg.forEach(n),yg.forEach(n),Ex=p(T),pr=o(T,"DIV",{class:!0});var $g=s(pr);h(Pa.$$.fragment,$g),qx=p($g),Ta=o($g,"P",{});var Eg=s(Ta);Ax=i(Eg,"Scikit / Keras interface to transformers\u2019 pipelines. This method will forward to "),Gm=o(Eg,"STRONG",{});var gA=s(Gm);Cx=i(gA,"call"),gA.forEach(n),zx=i(Eg,"()."),Eg.forEach(n),$g.forEach(n),Dx=p(T),cr=o(T,"DIV",{class:!0});var qg=s(cr);h(ya.$$.fragment,qg),Ix=p(qg),Wt=o(qg,"P",{});var Tl=s(Wt);jx=i(Tl,"Preprocess will take the "),Nm=o(Tl,"CODE",{});var _A=s(Nm);Sx=i(_A,"input_"),_A.forEach(n),Mx=i(Tl,` of a specific pipeline and return a dictionnary of everything necessary for `),Om=o(Tl,"CODE",{});var vA=s(Om);Fx=i(vA,"_forward"),vA.forEach(n),Lx=i(Tl," to run properly. It should contain at least one tensor, but might have arbitrary other items."),Tl.forEach(n),qg.forEach(n),Ux=p(T),dr=o(T,"DIV",{class:!0});var Ag=s(dr);h(xa.$$.fragment,Ag),Gx=p(Ag),Rm=o(Ag,"P",{});var wA=s(Rm);Nx=i(wA,"Save the pipeline\u2019s model and tokenizer."),wA.forEach(n),Ag.forEach(n),Ox=p(T),mr=o(T,"DIV",{class:!0});var Cg=s(mr);h($a.$$.fragment,Cg),Rx=p(Cg),Ea=o(Cg,"P",{});var zg=s(Ea);Hx=i(zg,"Scikit / Keras interface to transformers\u2019 pipelines. This method will forward to "),Hm=o(zg,"STRONG",{});var bA=s(Hm);Wx=i(bA,"call"),bA.forEach(n),Qx=i(zg,"()."),zg.forEach(n),Cg.forEach(n),T.forEach(n),this.h()},h(){c(x,"name","hf:doc:metadata"),c(x,"content",JSON.stringify(AA)),c(A,"id","pipelines"),c(A,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(A,"href","#pipelines"),c($,"class","relative group"),c(Ca,"href","../task_summary"),c(Da,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),c(Ia,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.AudioClassificationPipeline"),c(ja,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.AutomaticSpeechRecognitionPipeline"),c(Sa,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.ConversationalPipeline"),c(Ma,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.FeatureExtractionPipeline"),c(Fa,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.FillMaskPipeline"),c(La,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.ImageClassificationPipeline"),c(Ua,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.ImageSegmentationPipeline"),c(Ga,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.ObjectDetectionPipeline"),c(Na,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.QuestionAnsweringPipeline"),c(Oa,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.SummarizationPipeline"),c(Ra,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.TableQuestionAnsweringPipeline"),c(Ha,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.TextClassificationPipeline"),c(Wa,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.TextGenerationPipeline"),c(Qa,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.Text2TextGenerationPipeline"),c(Va,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.TokenClassificationPipeline"),c(Ba,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.TranslationPipeline"),c(Za,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.ZeroShotClassificationPipeline"),c(Ya,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.ZeroShotImageClassificationPipeline"),c(Bt,"id","transformers.pipeline"),c(Bt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Bt,"href","#transformers.pipeline"),c(Ve,"class","relative group"),c(Tr,"href","https://huggingface.co"),c(Tr,"rel","nofollow"),c(Ja,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.Pipeline"),c(ei,"href","tokenizer"),c(ti,"href","model"),c(R,"class","docstring"),c(Jt,"id","pipeline-batching"),c(Jt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Jt,"href","#pipeline-batching"),c(Ze,"class","relative group"),c(nn,"id","pipeline-chunk-batching"),c(nn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(nn,"href","#pipeline-chunk-batching"),c(Xe,"class","relative group"),c(on,"id","pipeline-custom-code"),c(on,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(on,"href","#pipeline-custom-code"),c(Ke,"class","relative group"),c(an,"id","implementing-a-pipeline"),c(an,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(an,"href","#implementing-a-pipeline"),c(Je,"class","relative group"),c(mi,"href","../add_new_pipeline"),c(ln,"id","the-task-specific-pipelines"),c(ln,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ln,"href","#the-task-specific-pipelines"),c(et,"class","relative group"),c(pn,"id","transformers.AudioClassificationPipeline"),c(pn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(pn,"href","#transformers.AudioClassificationPipeline"),c(tt,"class","relative group"),c(fi,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),c(Xr,"href","https://huggingface.co/models?filter=audio-classification"),c(Xr,"rel","nofollow"),c(hi,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.AutomaticSpeechRecognitionPipeline"),c(cn,"class","docstring"),c(J,"class","docstring"),c(dn,"id","transformers.AutomaticSpeechRecognitionPipeline"),c(dn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(dn,"href","#transformers.AutomaticSpeechRecognitionPipeline"),c(rt,"class","relative group"),c(ui,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.AutomaticSpeechRecognitionPipeline"),c(mn,"class","docstring"),c(he,"class","docstring"),c(fn,"id","transformers.Conversation"),c(fn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(fn,"href","#transformers.Conversation"),c(ot,"class","relative group"),c(gi,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.ConversationalPipeline"),c(_i,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.ConversationalPipeline"),c(hn,"class","docstring"),c(un,"class","docstring"),c(Ce,"class","docstring"),c(gn,"class","docstring"),c(D,"class","docstring"),c(vi,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),c(ho,"href","https://huggingface.co/models?filter=conversational"),c(ho,"rel","nofollow"),c(_n,"class","docstring"),c(M,"class","docstring"),c(vn,"id","transformers.FeatureExtractionPipeline"),c(vn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(vn,"href","#transformers.FeatureExtractionPipeline"),c(at,"class","relative group"),c(wi,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),c(bo,"href","https://huggingface.co/models"),c(bo,"rel","nofollow"),c(wn,"class","docstring"),c(ee,"class","docstring"),c(bn,"id","transformers.FillMaskPipeline"),c(bn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(bn,"href","#transformers.FillMaskPipeline"),c(lt,"class","relative group"),c(bi,"href","../task_summary#masked-language-modeling"),c(ki,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),c(xo,"href","https://huggingface.co/models?filter=fill-mask"),c(xo,"rel","nofollow"),c(Pn,"class","docstring"),c(H,"class","docstring"),c(Tn,"id","transformers.ImageClassificationPipeline"),c(Tn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Tn,"href","#transformers.ImageClassificationPipeline"),c(dt,"class","relative group"),c(Pi,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),c(zo,"href","https://huggingface.co/models?filter=image-classification"),c(zo,"rel","nofollow"),c(yn,"class","docstring"),c(te,"class","docstring"),c(xn,"id","transformers.ImageSegmentationPipeline"),c(xn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(xn,"href","#transformers.ImageSegmentationPipeline"),c(ft,"class","relative group"),c(Ti,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),c(Fo,"href","https://huggingface.co/models?filter=image-segmentation"),c(Fo,"rel","nofollow"),c($n,"class","docstring"),c(ne,"class","docstring"),c(En,"id","transformers.TokenClassificationPipeline"),c(En,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(En,"href","#transformers.TokenClassificationPipeline"),c(ut,"class","relative group"),c(yi,"href","../task_summary#named-entity-recognition"),c(xi,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),c(Oo,"href","https://huggingface.co/models?filter=token-classification"),c(Oo,"rel","nofollow"),c(ze,"class","docstring"),c(qn,"class","docstring"),c(An,"class","docstring"),c(Cn,"class","docstring"),c(I,"class","docstring"),c($i,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.TokenClassificationPipeline"),c(Dn,"id","transformers.ObjectDetectionPipeline"),c(Dn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Dn,"href","#transformers.ObjectDetectionPipeline"),c(vt,"class","relative group"),c(Ei,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),c(Xo,"href","https://huggingface.co/models?filter=object-detection"),c(Xo,"rel","nofollow"),c(In,"class","docstring"),c(re,"class","docstring"),c(jn,"id","transformers.QuestionAnsweringPipeline"),c(jn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(jn,"href","#transformers.QuestionAnsweringPipeline"),c(bt,"class","relative group"),c(qi,"href","../task_summary#question-answering"),c(Ai,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),c(ns,"href","https://huggingface.co/models?filter=question-answering"),c(ns,"rel","nofollow"),c(Sn,"class","docstring"),c(De,"class","docstring"),c(Ie,"class","docstring"),c(Fn,"class","docstring"),c(j,"class","docstring"),c(Ln,"id","transformers.SummarizationPipeline"),c(Ln,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Ln,"href","#transformers.SummarizationPipeline"),c(Tt,"class","relative group"),c(Ci,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),c(cs,"href","https://huggingface.co/models?filter=summarization"),c(cs,"rel","nofollow"),c(Un,"class","docstring"),c(F,"class","docstring"),c(Gn,"id","transformers.TableQuestionAnsweringPipeline"),c(Gn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Gn,"href","#transformers.TableQuestionAnsweringPipeline"),c(xt,"class","relative group"),c(zi,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),c(_s,"href","https://huggingface.co/models?filter=table-question-answering"),c(_s,"rel","nofollow"),c(C,"class","docstring"),c(oe,"class","docstring"),c(Nn,"id","transformers.TextClassificationPipeline"),c(Nn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Nn,"href","#transformers.TextClassificationPipeline"),c(Et,"class","relative group"),c(Di,"href","../task_summary#sequence-classification"),c(Ii,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),c($s,"href","https://huggingface.co/models?filter=text-classification"),c($s,"rel","nofollow"),c(On,"class","docstring"),c(Q,"class","docstring"),c(Rn,"id","transformers.TextGenerationPipeline"),c(Rn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Rn,"href","#transformers.TextGenerationPipeline"),c(Ct,"class","relative group"),c(ji,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),c(Ds,"href","https://huggingface.co/models?filter=text-generation"),c(Ds,"rel","nofollow"),c(Hn,"class","docstring"),c(se,"class","docstring"),c(Wn,"id","transformers.Text2TextGenerationPipeline"),c(Wn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Wn,"href","#transformers.Text2TextGenerationPipeline"),c(Dt,"class","relative group"),c(Si,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),c(Fs,"href","https://huggingface.co/models?filter=text2text-generation"),c(Fs,"rel","nofollow"),c(Qn,"class","docstring"),c(Vn,"class","docstring"),c(S,"class","docstring"),c(Bn,"id","transformers.TokenClassificationPipeline"),c(Bn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Bn,"href","#transformers.TokenClassificationPipeline"),c(jt,"class","relative group"),c(Mi,"href","../task_summary#named-entity-recognition"),c(Fi,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),c(Hs,"href","https://huggingface.co/models?filter=token-classification"),c(Hs,"rel","nofollow"),c(Zn,"class","docstring"),c(je,"class","docstring"),c(Yn,"class","docstring"),c(Xn,"class","docstring"),c(Kn,"class","docstring"),c(q,"class","docstring"),c(Jn,"id","transformers.TranslationPipeline"),c(Jn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Jn,"href","#transformers.TranslationPipeline"),c(Ft,"class","relative group"),c(Li,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),c(Js,"href","https://huggingface.co/models?filter=translation"),c(Js,"rel","nofollow"),c(er,"class","docstring"),c(U,"class","docstring"),c(tr,"id","transformers.ZeroShotClassificationPipeline"),c(tr,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(tr,"href","#transformers.ZeroShotClassificationPipeline"),c(Ut,"class","relative group"),c(Ui,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),c(aa,"href","https://huggingface.co/models?search=nli"),c(aa,"rel","nofollow"),c(Gi,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.ZeroShotClassificationPipeline"),c(nr,"class","docstring"),c(V,"class","docstring"),c(rr,"id","transformers.ZeroShotImageClassificationPipeline"),c(rr,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(rr,"href","#transformers.ZeroShotImageClassificationPipeline"),c(Nt,"class","relative group"),c(Ni,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline"),c(ma,"href","https://huggingface.co/models?filter=zero-shot-image-classification"),c(ma,"rel","nofollow"),c(or,"class","docstring"),c(ae,"class","docstring"),c(sr,"id","transformers.Pipeline"),c(sr,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(sr,"href","#transformers.Pipeline"),c(Ht,"class","relative group"),c(Ri,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.FeatureExtractionPipeline"),c(ar,"class","docstring"),c(ve,"class","docstring"),c(ir,"class","docstring"),c(lr,"class","docstring"),c(pr,"class","docstring"),c(cr,"class","docstring"),c(dr,"class","docstring"),c(mr,"class","docstring"),c(P,"class","docstring")},m(t,d){e(document.head,x),m(t,ie,d),m(t,$,d),e($,A),e(A,fe),u(E,fe,null),e($,O),e($,Te),e(Te,Dg),m(t,Vm,d),m(t,Qt,d),e(Qt,Ig),e(Qt,Ca),e(Ca,jg),e(Qt,Sg),m(t,Bm,d),m(t,za,d),e(za,Mg),m(t,Zm,d),m(t,Vt,d),e(Vt,yl),e(yl,wr),e(wr,Fg),e(wr,Da),e(Da,Lg),e(wr,Ug),e(Vt,Gg),e(Vt,br),e(br,xl),e(xl,Ng),e(br,Og),e(br,b),e(b,$l),e($l,Ia),e(Ia,Rg),e(b,Hg),e(b,El),e(El,ja),e(ja,Wg),e(b,Qg),e(b,ql),e(ql,Sa),e(Sa,Vg),e(b,Bg),e(b,Al),e(Al,Ma),e(Ma,Zg),e(b,Yg),e(b,Cl),e(Cl,Fa),e(Fa,Xg),e(b,Kg),e(b,zl),e(zl,La),e(La,Jg),e(b,e_),e(b,Dl),e(Dl,Ua),e(Ua,t_),e(b,n_),e(b,Il),e(Il,Ga),e(Ga,r_),e(b,o_),e(b,jl),e(jl,Na),e(Na,s_),e(b,a_),e(b,Sl),e(Sl,Oa),e(Oa,i_),e(b,l_),e(b,Ml),e(Ml,Ra),e(Ra,p_),e(b,c_),e(b,Fl),e(Fl,Ha),e(Ha,d_),e(b,m_),e(b,Ll),e(Ll,Wa),e(Wa,f_),e(b,h_),e(b,Ul),e(Ul,Qa),e(Qa,u_),e(b,g_),e(b,Gl),e(Gl,Va),e(Va,__),e(b,v_),e(b,Nl),e(Nl,Ba),e(Ba,w_),e(b,b_),e(b,Ol),e(Ol,Za),e(Za,k_),e(b,P_),e(b,Rl),e(Rl,Ya),e(Ya,T_),m(t,Ym,d),m(t,Ve,d),e(Ve,Bt),e(Bt,Hl),u(kr,Hl,null),e(Ve,y_),e(Ve,Wl),e(Wl,x_),m(t,Xm,d),m(t,Zt,d),e(Zt,$_),e(Zt,Ql),e(Ql,E_),e(Zt,q_),m(t,Km,d),m(t,Xa,d),e(Xa,A_),m(t,Jm,d),u(Pr,t,d),m(t,ef,d),m(t,Yt,d),e(Yt,C_),e(Yt,Tr),e(Tr,z_),e(Yt,D_),m(t,tf,d),u(yr,t,d),m(t,nf,d),m(t,Xt,d),e(Xt,I_),e(Xt,Vl),e(Vl,j_),e(Xt,S_),m(t,rf,d),u(xr,t,d),m(t,of,d),m(t,Kt,d),e(Kt,M_),e(Kt,Bl),e(Bl,F_),e(Kt,L_),m(t,sf,d),u($r,t,d),m(t,af,d),m(t,Ka,d),e(Ka,U_),m(t,lf,d),u(Er,t,d),m(t,pf,d),m(t,R,d),u(qr,R,null),e(R,G_),e(R,Ar),e(Ar,N_),e(Ar,Ja),e(Ja,O_),e(Ar,R_),e(R,H_),e(R,Zl),e(Zl,W_),e(R,Q_),e(R,Be),e(Be,Cr),e(Cr,V_),e(Cr,ei),e(ei,B_),e(Cr,Z_),e(Be,Y_),e(Be,zr),e(zr,X_),e(zr,ti),e(ti,K_),e(zr,J_),e(Be,e1),e(Be,Yl),e(Yl,t1),e(R,n1),e(R,Xl),e(Xl,r1),e(R,o1),u(Dr,R,null),m(t,cf,d),m(t,Ze,d),e(Ze,Jt),e(Jt,Kl),u(Ir,Kl,null),e(Ze,s1),e(Ze,Jl),e(Jl,a1),m(t,df,d),m(t,qe,d),e(qe,i1),e(qe,ep),e(ep,l1),e(qe,p1),e(qe,tp),e(tp,c1),e(qe,d1),m(t,mf,d),u(jr,t,d),m(t,ff,d),u(en,t,d),m(t,hf,d),u(Sr,t,d),m(t,uf,d),u(Mr,t,d),m(t,gf,d),m(t,ni,d),e(ni,m1),m(t,_f,d),u(Fr,t,d),m(t,vf,d),m(t,tn,d),e(tn,f1),e(tn,np),e(np,h1),e(tn,u1),m(t,wf,d),u(Lr,t,d),m(t,bf,d),m(t,ri,d),e(ri,g1),m(t,kf,d),m(t,oi,d),e(oi,_1),m(t,Pf,d),m(t,le,d),e(le,rp),e(rp,op),e(op,sp),e(sp,v1),e(le,w1),e(le,ap),e(ap,ip),e(ip,b1),e(le,k1),e(le,lp),e(lp,pp),e(pp,P1),e(le,T1),e(le,Ur),e(Ur,cp),e(cp,y1),e(Ur,x1),e(Ur,Ye),e(Ye,dp),e(dp,$1),e(Ye,E1),e(Ye,mp),e(mp,q1),e(Ye,A1),e(Ye,fp),e(fp,C1),e(le,z1),e(le,hp),e(hp,up),e(up,D1),m(t,Tf,d),m(t,Xe,d),e(Xe,nn),e(nn,gp),u(Gr,gp,null),e(Xe,I1),e(Xe,_p),e(_p,j1),m(t,yf,d),m(t,ye,d),e(ye,vp),e(vp,S1),e(ye,M1),e(ye,wp),e(wp,F1),e(ye,L1),e(ye,bp),e(bp,U1),e(ye,G1),m(t,xf,d),m(t,Ae,d),e(Ae,N1),e(Ae,kp),e(kp,O1),e(Ae,R1),e(Ae,Pp),e(Pp,H1),e(Ae,W1),m(t,$f,d),u(Nr,t,d),m(t,Ef,d),m(t,si,d),e(si,Q1),m(t,qf,d),u(Or,t,d),m(t,Af,d),m(t,ai,d),e(ai,V1),m(t,Cf,d),m(t,rn,d),e(rn,B1),e(rn,Tp),e(Tp,Z1),e(rn,Y1),m(t,zf,d),m(t,Ke,d),e(Ke,on),e(on,yp),u(Rr,yp,null),e(Ke,X1),e(Ke,xp),e(xp,K1),m(t,Df,d),m(t,ii,d),e(ii,J1),m(t,If,d),m(t,sn,d),e(sn,ev),e(sn,$p),e($p,tv),e(sn,nv),m(t,jf,d),m(t,li,d),e(li,rv),m(t,Sf,d),m(t,pi,d),e(pi,Ep),e(Ep,ov),m(t,Mf,d),u(Hr,t,d),m(t,Ff,d),m(t,ci,d),e(ci,sv),m(t,Lf,d),m(t,Je,d),e(Je,an),e(an,qp),u(Wr,qp,null),e(Je,av),e(Je,Ap),e(Ap,iv),m(t,Uf,d),m(t,di,d),e(di,mi),e(mi,lv),m(t,Gf,d),m(t,et,d),e(et,ln),e(ln,Cp),u(Qr,Cp,null),e(et,pv),e(et,zp),e(zp,cv),m(t,Nf,d),m(t,tt,d),e(tt,pn),e(pn,Dp),u(Vr,Dp,null),e(tt,dv),e(tt,Ip),e(Ip,mv),m(t,Of,d),m(t,J,d),u(Br,J,null),e(J,fv),e(J,Zr),e(Zr,hv),e(Zr,jp),e(jp,uv),e(Zr,gv),e(J,_v),e(J,nt),e(nt,vv),e(nt,fi),e(fi,wv),e(nt,bv),e(nt,Sp),e(Sp,kv),e(nt,Pv),e(J,Tv),e(J,Yr),e(Yr,yv),e(Yr,Xr),e(Xr,xv),e(Yr,$v),e(J,Ev),e(J,cn),u(Kr,cn,null),e(cn,qv),e(cn,Jr),e(Jr,Av),e(Jr,hi),e(hi,Cv),e(Jr,zv),m(t,Rf,d),m(t,rt,d),e(rt,dn),e(dn,Mp),u(eo,Mp,null),e(rt,Dv),e(rt,Fp),e(Fp,Iv),m(t,Hf,d),m(t,he,d),u(to,he,null),e(he,jv),e(he,Lp),e(Lp,Sv),e(he,Mv),e(he,Up),e(Up,Fv),e(he,Lv),e(he,mn),u(no,mn,null),e(mn,Uv),e(mn,ro),e(ro,Gv),e(ro,ui),e(ui,Nv),e(ro,Ov),m(t,Wf,d),m(t,ot,d),e(ot,fn),e(fn,Gp),u(oo,Gp,null),e(ot,Rv),e(ot,Np),e(Np,Hv),m(t,Qf,d),m(t,D,d),u(so,D,null),e(D,Wv),e(D,xe),e(xe,Qv),e(xe,gi),e(gi,Vv),e(xe,Bv),e(xe,_i),e(_i,Zv),e(xe,Yv),e(xe,Op),e(Op,Xv),e(xe,Kv),e(D,Jv),e(D,Rp),e(Rp,ew),e(D,tw),u(ao,D,null),e(D,nw),e(D,hn),u(io,hn,null),e(hn,rw),e(hn,lo),e(lo,ow),e(lo,Hp),e(Hp,sw),e(lo,aw),e(D,iw),e(D,un),u(po,un,null),e(un,lw),e(un,Wp),e(Wp,pw),e(D,cw),e(D,Ce),u(co,Ce,null),e(Ce,dw),e(Ce,Qp),e(Qp,mw),e(Ce,fw),e(Ce,ue),e(ue,hw),e(ue,Vp),e(Vp,uw),e(ue,gw),e(ue,Bp),e(Bp,_w),e(ue,vw),e(ue,Zp),e(Zp,ww),e(ue,bw),e(ue,Yp),e(Yp,kw),e(ue,Pw),e(D,Tw),e(D,gn),u(mo,gn,null),e(gn,yw),e(gn,$e),e($e,xw),e($e,Xp),e(Xp,$w),e($e,Ew),e($e,Kp),e(Kp,qw),e($e,Aw),e($e,Jp),e(Jp,Cw),e($e,zw),m(t,Vf,d),m(t,M,d),u(fo,M,null),e(M,Dw),e(M,ec),e(ec,Iw),e(M,jw),e(M,st),e(st,Sw),e(st,vi),e(vi,Mw),e(st,Fw),e(st,tc),e(tc,Lw),e(st,Uw),e(M,Gw),e(M,ge),e(ge,Nw),e(ge,nc),e(nc,Ow),e(ge,Rw),e(ge,rc),e(rc,Hw),e(ge,Ww),e(ge,oc),e(oc,Qw),e(ge,Vw),e(ge,ho),e(ho,Bw),e(ge,Zw),e(M,Yw),e(M,sc),e(sc,Xw),e(M,Kw),u(uo,M,null),e(M,Jw),e(M,_n),u(go,_n,null),e(_n,eb),e(_n,ac),e(ac,tb),m(t,Bf,d),m(t,at,d),e(at,vn),e(vn,ic),u(_o,ic,null),e(at,nb),e(at,lc),e(lc,rb),m(t,Zf,d),m(t,ee,d),u(vo,ee,null),e(ee,ob),e(ee,pc),e(pc,sb),e(ee,ab),e(ee,it),e(it,ib),e(it,wi),e(wi,lb),e(it,pb),e(it,cc),e(cc,cb),e(it,db),e(ee,mb),e(ee,wo),e(wo,fb),e(wo,bo),e(bo,hb),e(wo,ub),e(ee,gb),e(ee,wn),u(ko,wn,null),e(wn,_b),e(wn,dc),e(dc,vb),m(t,Yf,d),m(t,lt,d),e(lt,bn),e(bn,mc),u(Po,mc,null),e(lt,wb),e(lt,fc),e(fc,bb),m(t,Xf,d),m(t,H,d),u(To,H,null),e(H,kb),e(H,pt),e(pt,Pb),e(pt,hc),e(hc,Tb),e(pt,yb),e(pt,bi),e(bi,xb),e(pt,$b),e(H,Eb),e(H,ct),e(ct,qb),e(ct,ki),e(ki,Ab),e(ct,Cb),e(ct,uc),e(uc,zb),e(ct,Db),e(H,Ib),e(H,yo),e(yo,jb),e(yo,xo),e(xo,Sb),e(yo,Mb),e(H,Fb),u(kn,H,null),e(H,Lb),e(H,Pn),u($o,Pn,null),e(Pn,Ub),e(Pn,gc),e(gc,Gb),m(t,Kf,d),m(t,dt,d),e(dt,Tn),e(Tn,_c),u(Eo,_c,null),e(dt,Nb),e(dt,vc),e(vc,Ob),m(t,Jf,d),m(t,te,d),u(qo,te,null),e(te,Rb),e(te,Ao),e(Ao,Hb),e(Ao,wc),e(wc,Wb),e(Ao,Qb),e(te,Vb),e(te,mt),e(mt,Bb),e(mt,Pi),e(Pi,Zb),e(mt,Yb),e(mt,bc),e(bc,Xb),e(mt,Kb),e(te,Jb),e(te,Co),e(Co,ek),e(Co,zo),e(zo,tk),e(Co,nk),e(te,rk),e(te,yn),u(Do,yn,null),e(yn,ok),e(yn,kc),e(kc,sk),m(t,eh,d),m(t,ft,d),e(ft,xn),e(xn,Pc),u(Io,Pc,null),e(ft,ak),e(ft,Tc),e(Tc,ik),m(t,th,d),m(t,ne,d),u(jo,ne,null),e(ne,lk),e(ne,So),e(So,pk),e(So,yc),e(yc,ck),e(So,dk),e(ne,mk),e(ne,ht),e(ht,fk),e(ht,Ti),e(Ti,hk),e(ht,uk),e(ht,xc),e(xc,gk),e(ht,_k),e(ne,vk),e(ne,Mo),e(Mo,wk),e(Mo,Fo),e(Fo,bk),e(Mo,kk),e(ne,Pk),e(ne,$n),u(Lo,$n,null),e($n,Tk),e($n,$c),e($c,yk),m(t,nh,d),m(t,ut,d),e(ut,En),e(En,Ec),u(Uo,Ec,null),e(ut,xk),e(ut,qc),e(qc,$k),m(t,rh,d),m(t,I,d),u(Go,I,null),e(I,Ek),e(I,gt),e(gt,qk),e(gt,Ac),e(Ac,Ak),e(gt,Ck),e(gt,yi),e(yi,zk),e(gt,Dk),e(I,Ik),e(I,_t),e(_t,jk),e(_t,xi),e(xi,Sk),e(_t,Mk),e(_t,Cc),e(Cc,Fk),e(_t,Lk),e(I,Uk),e(I,No),e(No,Gk),e(No,Oo),e(Oo,Nk),e(No,Ok),e(I,Rk),e(I,ze),u(Ro,ze,null),e(ze,Hk),e(ze,zc),e(zc,Wk),e(ze,Qk),e(ze,Dc),e(Dc,Vk),e(I,Bk),e(I,qn),u(Ho,qn,null),e(qn,Zk),e(qn,Ic),e(Ic,Yk),e(I,Xk),e(I,An),u(Wo,An,null),e(An,Kk),e(An,jc),e(jc,Jk),e(I,eP),e(I,Cn),u(Qo,Cn,null),e(Cn,tP),e(Cn,Sc),e(Sc,nP),m(t,oh,d),m(t,zn,d),e(zn,rP),e(zn,$i),e($i,oP),e(zn,sP),m(t,sh,d),m(t,vt,d),e(vt,Dn),e(Dn,Mc),u(Vo,Mc,null),e(vt,aP),e(vt,Fc),e(Fc,iP),m(t,ah,d),m(t,re,d),u(Bo,re,null),e(re,lP),e(re,Zo),e(Zo,pP),e(Zo,Lc),e(Lc,cP),e(Zo,dP),e(re,mP),e(re,wt),e(wt,fP),e(wt,Ei),e(Ei,hP),e(wt,uP),e(wt,Uc),e(Uc,gP),e(wt,_P),e(re,vP),e(re,Yo),e(Yo,wP),e(Yo,Xo),e(Xo,bP),e(Yo,kP),e(re,PP),e(re,In),u(Ko,In,null),e(In,TP),e(In,Gc),e(Gc,yP),m(t,ih,d),m(t,bt,d),e(bt,jn),e(jn,Nc),u(Jo,Nc,null),e(bt,xP),e(bt,Oc),e(Oc,$P),m(t,lh,d),m(t,j,d),u(es,j,null),e(j,EP),e(j,kt),e(kt,qP),e(kt,Rc),e(Rc,AP),e(kt,CP),e(kt,qi),e(qi,zP),e(kt,DP),e(j,IP),e(j,Pt),e(Pt,jP),e(Pt,Ai),e(Ai,SP),e(Pt,MP),e(Pt,Hc),e(Hc,FP),e(Pt,LP),e(j,UP),e(j,ts),e(ts,GP),e(ts,ns),e(ns,NP),e(ts,OP),e(j,RP),e(j,Sn),u(rs,Sn,null),e(Sn,HP),e(Sn,Wc),e(Wc,WP),e(j,QP),e(j,De),u(os,De,null),e(De,VP),e(De,Mn),e(Mn,BP),e(Mn,Qc),e(Qc,ZP),e(Mn,YP),e(Mn,Vc),e(Vc,XP),e(De,KP),e(De,Bc),e(Bc,JP),e(j,eT),e(j,Ie),u(ss,Ie,null),e(Ie,tT),e(Ie,as),e(as,nT),e(as,Zc),e(Zc,rT),e(as,oT),e(Ie,sT),e(Ie,Yc),e(Yc,aT),e(j,iT),e(j,Fn),u(is,Fn,null),e(Fn,lT),e(Fn,Xc),e(Xc,pT),m(t,ph,d),m(t,Tt,d),e(Tt,Ln),e(Ln,Kc),u(ls,Kc,null),e(Tt,cT),e(Tt,Jc),e(Jc,dT),m(t,ch,d),m(t,F,d),u(ps,F,null),e(F,mT),e(F,ed),e(ed,fT),e(F,hT),e(F,yt),e(yt,uT),e(yt,Ci),e(Ci,gT),e(yt,_T),e(yt,td),e(td,vT),e(yt,wT),e(F,bT),e(F,L),e(L,kT),e(L,nd),e(nd,PT),e(L,TT),e(L,rd),e(rd,yT),e(L,xT),e(L,od),e(od,$T),e(L,ET),e(L,sd),e(sd,qT),e(L,AT),e(L,ad),e(ad,CT),e(L,zT),e(L,id),e(id,DT),e(L,IT),e(L,cs),e(cs,jT),e(L,ST),e(F,MT),e(F,ld),e(ld,FT),e(F,LT),u(ds,F,null),e(F,UT),e(F,Un),u(ms,Un,null),e(Un,GT),e(Un,pd),e(pd,NT),m(t,dh,d),m(t,xt,d),e(xt,Gn),e(Gn,cd),u(fs,cd,null),e(xt,OT),e(xt,dd),e(dd,RT),m(t,mh,d),m(t,oe,d),u(hs,oe,null),e(oe,HT),e(oe,us),e(us,WT),e(us,md),e(md,QT),e(us,VT),e(oe,BT),e(oe,$t),e($t,ZT),e($t,zi),e(zi,YT),e($t,XT),e($t,fd),e(fd,KT),e($t,JT),e(oe,ey),e(oe,gs),e(gs,ty),e(gs,_s),e(_s,ny),e(gs,ry),e(oe,oy),e(oe,C),u(vs,C,null),e(C,sy),e(C,hd),e(hd,ay),e(C,iy),e(C,W),e(W,ud),e(ud,gd),e(gd,ly),e(W,py),e(W,_d),e(_d,vd),e(vd,cy),e(W,dy),e(W,wd),e(wd,bd),e(bd,my),e(W,fy),e(W,kd),e(kd,Pd),e(Pd,hy),e(W,uy),e(W,Td),e(Td,yd),e(yd,gy),e(W,_y),e(W,xd),e(xd,$d),e($d,vy),e(W,wy),e(W,Ed),e(Ed,qd),e(qd,by),e(C,ky),e(C,ws),e(ws,Py),e(ws,Ad),e(Ad,Ty),e(ws,yy),e(C,xy),e(C,Cd),e(Cd,$y),e(C,Ey),u(bs,C,null),e(C,qy),e(C,zd),e(zd,Ay),e(C,Cy),e(C,Dd),e(Dd,zy),e(C,Dy),u(ks,C,null),m(t,fh,d),m(t,Et,d),e(Et,Nn),e(Nn,Id),u(Ps,Id,null),e(Et,Iy),e(Et,jd),e(jd,jy),m(t,hh,d),m(t,Q,d),u(Ts,Q,null),e(Q,Sy),e(Q,qt),e(qt,My),e(qt,Sd),e(Sd,Fy),e(qt,Ly),e(qt,Di),e(Di,Uy),e(qt,Gy),e(Q,Ny),e(Q,At),e(At,Oy),e(At,Ii),e(Ii,Ry),e(At,Hy),e(At,Md),e(Md,Wy),e(At,Qy),e(Q,Vy),e(Q,ys),e(ys,By),e(ys,Fd),e(Fd,Zy),e(ys,Yy),e(Q,Xy),e(Q,xs),e(xs,Ky),e(xs,$s),e($s,Jy),e(xs,e2),e(Q,t2),e(Q,On),u(Es,On,null),e(On,n2),e(On,Ld),e(Ld,r2),m(t,uh,d),m(t,Ct,d),e(Ct,Rn),e(Rn,Ud),u(qs,Ud,null),e(Ct,o2),e(Ct,Gd),e(Gd,s2),m(t,gh,d),m(t,se,d),u(As,se,null),e(se,a2),e(se,Cs),e(Cs,i2),e(Cs,Nd),e(Nd,l2),e(Cs,p2),e(se,c2),e(se,zt),e(zt,d2),e(zt,ji),e(ji,m2),e(zt,f2),e(zt,Od),e(Od,h2),e(zt,u2),e(se,g2),e(se,zs),e(zs,_2),e(zs,Ds),e(Ds,v2),e(zs,w2),e(se,b2),e(se,Hn),u(Is,Hn,null),e(Hn,k2),e(Hn,Rd),e(Rd,P2),m(t,_h,d),m(t,Dt,d),e(Dt,Wn),e(Wn,Hd),u(js,Hd,null),e(Dt,T2),e(Dt,Wd),e(Wd,y2),m(t,vh,d),m(t,S,d),u(Ss,S,null),e(S,x2),e(S,Qd),e(Qd,$2),e(S,E2),e(S,It),e(It,q2),e(It,Si),e(Si,A2),e(It,C2),e(It,Vd),e(Vd,z2),e(It,D2),e(S,I2),e(S,Ms),e(Ms,j2),e(Ms,Fs),e(Fs,S2),e(Ms,M2),e(S,F2),e(S,Bd),e(Bd,L2),e(S,U2),u(Ls,S,null),e(S,G2),e(S,Qn),u(Us,Qn,null),e(Qn,N2),e(Qn,Zd),e(Zd,O2),e(S,R2),e(S,Vn),u(Gs,Vn,null),e(Vn,H2),e(Vn,Yd),e(Yd,W2),m(t,wh,d),m(t,jt,d),e(jt,Bn),e(Bn,Xd),u(Ns,Xd,null),e(jt,Q2),e(jt,Kd),e(Kd,V2),m(t,bh,d),m(t,q,d),u(Os,q,null),e(q,B2),e(q,St),e(St,Z2),e(St,Jd),e(Jd,Y2),e(St,X2),e(St,Mi),e(Mi,K2),e(St,J2),e(q,e4),e(q,Mt),e(Mt,t4),e(Mt,Fi),e(Fi,n4),e(Mt,r4),e(Mt,em),e(em,o4),e(Mt,s4),e(q,a4),e(q,Rs),e(Rs,i4),e(Rs,Hs),e(Hs,l4),e(Rs,p4),e(q,c4),e(q,Zn),u(Ws,Zn,null),e(Zn,d4),e(Zn,tm),e(tm,m4),e(q,f4),e(q,je),u(Qs,je,null),e(je,h4),e(je,nm),e(nm,u4),e(je,g4),e(je,rm),e(rm,_4),e(q,v4),e(q,Yn),u(Vs,Yn,null),e(Yn,w4),e(Yn,om),e(om,b4),e(q,k4),e(q,Xn),u(Bs,Xn,null),e(Xn,P4),e(Xn,sm),e(sm,T4),e(q,y4),e(q,Kn),u(Zs,Kn,null),e(Kn,x4),e(Kn,am),e(am,$4),m(t,kh,d),m(t,Ft,d),e(Ft,Jn),e(Jn,im),u(Ys,im,null),e(Ft,E4),e(Ft,lm),e(lm,q4),m(t,Ph,d),m(t,U,d),u(Xs,U,null),e(U,A4),e(U,pm),e(pm,C4),e(U,z4),e(U,Lt),e(Lt,D4),e(Lt,Li),e(Li,I4),e(Lt,j4),e(Lt,cm),e(cm,S4),e(Lt,M4),e(U,F4),e(U,Ks),e(Ks,L4),e(Ks,Js),e(Js,U4),e(Ks,G4),e(U,N4),e(U,dm),e(dm,O4),e(U,R4),u(ea,U,null),e(U,H4),e(U,er),u(ta,er,null),e(er,W4),e(er,mm),e(mm,Q4),m(t,Th,d),m(t,Ut,d),e(Ut,tr),e(tr,fm),u(na,fm,null),e(Ut,V4),e(Ut,hm),e(hm,B4),m(t,yh,d),m(t,V,d),u(ra,V,null),e(V,Z4),e(V,oa),e(oa,Y4),e(oa,um),e(um,X4),e(oa,K4),e(V,J4),e(V,Ee),e(Ee,e0),e(Ee,gm),e(gm,t0),e(Ee,n0),e(Ee,_m),e(_m,r0),e(Ee,o0),e(Ee,vm),e(vm,s0),e(Ee,a0),e(V,i0),e(V,Gt),e(Gt,l0),e(Gt,Ui),e(Ui,p0),e(Gt,c0),e(Gt,wm),e(wm,d0),e(Gt,m0),e(V,f0),e(V,sa),e(sa,h0),e(sa,aa),e(aa,u0),e(sa,g0),e(V,_0),e(V,nr),u(ia,nr,null),e(nr,v0),e(nr,la),e(la,w0),e(la,Gi),e(Gi,b0),e(la,k0),m(t,xh,d),m(t,Nt,d),e(Nt,rr),e(rr,bm),u(pa,bm,null),e(Nt,P0),e(Nt,km),e(km,T0),m(t,$h,d),m(t,ae,d),u(ca,ae,null),e(ae,y0),e(ae,Ot),e(Ot,x0),e(Ot,Pm),e(Pm,$0),e(Ot,E0),e(Ot,Tm),e(Tm,q0),e(Ot,A0),e(ae,C0),e(ae,Rt),e(Rt,z0),e(Rt,Ni),e(Ni,D0),e(Rt,I0),e(Rt,ym),e(ym,j0),e(Rt,S0),e(ae,M0),e(ae,da),e(da,F0),e(da,ma),e(ma,L0),e(da,U0),e(ae,G0),e(ae,or),u(fa,or,null),e(or,N0),e(or,xm),e(xm,O0),m(t,Eh,d),m(t,Ht,d),e(Ht,sr),e(sr,$m),u(ha,$m,null),e(Ht,R0),e(Ht,Oi),e(Oi,H0),e(Oi,Em),e(Em,W0),m(t,qh,d),m(t,P,d),u(ua,P,null),e(P,Q0),e(P,qm),e(qm,V0),e(P,B0),e(P,Am),e(Am,Z0),e(P,Y0),e(P,Cm),e(Cm,X0),e(P,K0),e(P,zm),e(zm,J0),e(P,ex),e(P,_e),e(_e,tx),e(_e,Ri),e(Ri,nx),e(_e,rx),e(_e,Dm),e(Dm,ox),e(_e,sx),e(_e,Im),e(Im,ax),e(_e,ix),e(_e,jm),e(jm,lx),e(_e,px),e(P,cx),e(P,ar),u(ga,ar,null),e(ar,dx),e(ar,Sm),e(Sm,mx),e(P,fx),e(P,ve),u(_a,ve,null),e(ve,hx),e(ve,Mm),e(Mm,ux),e(ve,gx),e(ve,Fm),e(Fm,_x),e(ve,vx),u(va,ve,null),e(P,wx),e(P,ir),u(wa,ir,null),e(ir,bx),e(ir,Lm),e(Lm,kx),e(P,Px),e(P,lr),u(ba,lr,null),e(lr,Tx),e(lr,ka),e(ka,yx),e(ka,Um),e(Um,xx),e(ka,$x),e(P,Ex),e(P,pr),u(Pa,pr,null),e(pr,qx),e(pr,Ta),e(Ta,Ax),e(Ta,Gm),e(Gm,Cx),e(Ta,zx),e(P,Dx),e(P,cr),u(ya,cr,null),e(cr,Ix),e(cr,Wt),e(Wt,jx),e(Wt,Nm),e(Nm,Sx),e(Wt,Mx),e(Wt,Om),e(Om,Fx),e(Wt,Lx),e(P,Ux),e(P,dr),u(xa,dr,null),e(dr,Gx),e(dr,Rm),e(Rm,Nx),e(P,Ox),e(P,mr),u($a,mr,null),e(mr,Rx),e(mr,Ea),e(Ea,Hx),e(Ea,Hm),e(Hm,Wx),e(Ea,Qx),Ah=!0},p(t,[d]){const qa={};d&2&&(qa.$$scope={dirty:d,ctx:t}),en.$set(qa);const Wm={};d&2&&(Wm.$$scope={dirty:d,ctx:t}),kn.$set(Wm)},i(t){Ah||(g(E.$$.fragment,t),g(kr.$$.fragment,t),g(Pr.$$.fragment,t),g(yr.$$.fragment,t),g(xr.$$.fragment,t),g($r.$$.fragment,t),g(Er.$$.fragment,t),g(qr.$$.fragment,t),g(Dr.$$.fragment,t),g(Ir.$$.fragment,t),g(jr.$$.fragment,t),g(en.$$.fragment,t),g(Sr.$$.fragment,t),g(Mr.$$.fragment,t),g(Fr.$$.fragment,t),g(Lr.$$.fragment,t),g(Gr.$$.fragment,t),g(Nr.$$.fragment,t),g(Or.$$.fragment,t),g(Rr.$$.fragment,t),g(Hr.$$.fragment,t),g(Wr.$$.fragment,t),g(Qr.$$.fragment,t),g(Vr.$$.fragment,t),g(Br.$$.fragment,t),g(Kr.$$.fragment,t),g(eo.$$.fragment,t),g(to.$$.fragment,t),g(no.$$.fragment,t),g(oo.$$.fragment,t),g(so.$$.fragment,t),g(ao.$$.fragment,t),g(io.$$.fragment,t),g(po.$$.fragment,t),g(co.$$.fragment,t),g(mo.$$.fragment,t),g(fo.$$.fragment,t),g(uo.$$.fragment,t),g(go.$$.fragment,t),g(_o.$$.fragment,t),g(vo.$$.fragment,t),g(ko.$$.fragment,t),g(Po.$$.fragment,t),g(To.$$.fragment,t),g(kn.$$.fragment,t),g($o.$$.fragment,t),g(Eo.$$.fragment,t),g(qo.$$.fragment,t),g(Do.$$.fragment,t),g(Io.$$.fragment,t),g(jo.$$.fragment,t),g(Lo.$$.fragment,t),g(Uo.$$.fragment,t),g(Go.$$.fragment,t),g(Ro.$$.fragment,t),g(Ho.$$.fragment,t),g(Wo.$$.fragment,t),g(Qo.$$.fragment,t),g(Vo.$$.fragment,t),g(Bo.$$.fragment,t),g(Ko.$$.fragment,t),g(Jo.$$.fragment,t),g(es.$$.fragment,t),g(rs.$$.fragment,t),g(os.$$.fragment,t),g(ss.$$.fragment,t),g(is.$$.fragment,t),g(ls.$$.fragment,t),g(ps.$$.fragment,t),g(ds.$$.fragment,t),g(ms.$$.fragment,t),g(fs.$$.fragment,t),g(hs.$$.fragment,t),g(vs.$$.fragment,t),g(bs.$$.fragment,t),g(ks.$$.fragment,t),g(Ps.$$.fragment,t),g(Ts.$$.fragment,t),g(Es.$$.fragment,t),g(qs.$$.fragment,t),g(As.$$.fragment,t),g(Is.$$.fragment,t),g(js.$$.fragment,t),g(Ss.$$.fragment,t),g(Ls.$$.fragment,t),g(Us.$$.fragment,t),g(Gs.$$.fragment,t),g(Ns.$$.fragment,t),g(Os.$$.fragment,t),g(Ws.$$.fragment,t),g(Qs.$$.fragment,t),g(Vs.$$.fragment,t),g(Bs.$$.fragment,t),g(Zs.$$.fragment,t),g(Ys.$$.fragment,t),g(Xs.$$.fragment,t),g(ea.$$.fragment,t),g(ta.$$.fragment,t),g(na.$$.fragment,t),g(ra.$$.fragment,t),g(ia.$$.fragment,t),g(pa.$$.fragment,t),g(ca.$$.fragment,t),g(fa.$$.fragment,t),g(ha.$$.fragment,t),g(ua.$$.fragment,t),g(ga.$$.fragment,t),g(_a.$$.fragment,t),g(va.$$.fragment,t),g(wa.$$.fragment,t),g(ba.$$.fragment,t),g(Pa.$$.fragment,t),g(ya.$$.fragment,t),g(xa.$$.fragment,t),g($a.$$.fragment,t),Ah=!0)},o(t){_(E.$$.fragment,t),_(kr.$$.fragment,t),_(Pr.$$.fragment,t),_(yr.$$.fragment,t),_(xr.$$.fragment,t),_($r.$$.fragment,t),_(Er.$$.fragment,t),_(qr.$$.fragment,t),_(Dr.$$.fragment,t),_(Ir.$$.fragment,t),_(jr.$$.fragment,t),_(en.$$.fragment,t),_(Sr.$$.fragment,t),_(Mr.$$.fragment,t),_(Fr.$$.fragment,t),_(Lr.$$.fragment,t),_(Gr.$$.fragment,t),_(Nr.$$.fragment,t),_(Or.$$.fragment,t),_(Rr.$$.fragment,t),_(Hr.$$.fragment,t),_(Wr.$$.fragment,t),_(Qr.$$.fragment,t),_(Vr.$$.fragment,t),_(Br.$$.fragment,t),_(Kr.$$.fragment,t),_(eo.$$.fragment,t),_(to.$$.fragment,t),_(no.$$.fragment,t),_(oo.$$.fragment,t),_(so.$$.fragment,t),_(ao.$$.fragment,t),_(io.$$.fragment,t),_(po.$$.fragment,t),_(co.$$.fragment,t),_(mo.$$.fragment,t),_(fo.$$.fragment,t),_(uo.$$.fragment,t),_(go.$$.fragment,t),_(_o.$$.fragment,t),_(vo.$$.fragment,t),_(ko.$$.fragment,t),_(Po.$$.fragment,t),_(To.$$.fragment,t),_(kn.$$.fragment,t),_($o.$$.fragment,t),_(Eo.$$.fragment,t),_(qo.$$.fragment,t),_(Do.$$.fragment,t),_(Io.$$.fragment,t),_(jo.$$.fragment,t),_(Lo.$$.fragment,t),_(Uo.$$.fragment,t),_(Go.$$.fragment,t),_(Ro.$$.fragment,t),_(Ho.$$.fragment,t),_(Wo.$$.fragment,t),_(Qo.$$.fragment,t),_(Vo.$$.fragment,t),_(Bo.$$.fragment,t),_(Ko.$$.fragment,t),_(Jo.$$.fragment,t),_(es.$$.fragment,t),_(rs.$$.fragment,t),_(os.$$.fragment,t),_(ss.$$.fragment,t),_(is.$$.fragment,t),_(ls.$$.fragment,t),_(ps.$$.fragment,t),_(ds.$$.fragment,t),_(ms.$$.fragment,t),_(fs.$$.fragment,t),_(hs.$$.fragment,t),_(vs.$$.fragment,t),_(bs.$$.fragment,t),_(ks.$$.fragment,t),_(Ps.$$.fragment,t),_(Ts.$$.fragment,t),_(Es.$$.fragment,t),_(qs.$$.fragment,t),_(As.$$.fragment,t),_(Is.$$.fragment,t),_(js.$$.fragment,t),_(Ss.$$.fragment,t),_(Ls.$$.fragment,t),_(Us.$$.fragment,t),_(Gs.$$.fragment,t),_(Ns.$$.fragment,t),_(Os.$$.fragment,t),_(Ws.$$.fragment,t),_(Qs.$$.fragment,t),_(Vs.$$.fragment,t),_(Bs.$$.fragment,t),_(Zs.$$.fragment,t),_(Ys.$$.fragment,t),_(Xs.$$.fragment,t),_(ea.$$.fragment,t),_(ta.$$.fragment,t),_(na.$$.fragment,t),_(ra.$$.fragment,t),_(ia.$$.fragment,t),_(pa.$$.fragment,t),_(ca.$$.fragment,t),_(fa.$$.fragment,t),_(ha.$$.fragment,t),_(ua.$$.fragment,t),_(ga.$$.fragment,t),_(_a.$$.fragment,t),_(va.$$.fragment,t),_(wa.$$.fragment,t),_(ba.$$.fragment,t),_(Pa.$$.fragment,t),_(ya.$$.fragment,t),_(xa.$$.fragment,t),_($a.$$.fragment,t),Ah=!1},d(t){n(x),t&&n(ie),t&&n($),v(E),t&&n(Vm),t&&n(Qt),t&&n(Bm),t&&n(za),t&&n(Zm),t&&n(Vt),t&&n(Ym),t&&n(Ve),v(kr),t&&n(Xm),t&&n(Zt),t&&n(Km),t&&n(Xa),t&&n(Jm),v(Pr,t),t&&n(ef),t&&n(Yt),t&&n(tf),v(yr,t),t&&n(nf),t&&n(Xt),t&&n(rf),v(xr,t),t&&n(of),t&&n(Kt),t&&n(sf),v($r,t),t&&n(af),t&&n(Ka),t&&n(lf),v(Er,t),t&&n(pf),t&&n(R),v(qr),v(Dr),t&&n(cf),t&&n(Ze),v(Ir),t&&n(df),t&&n(qe),t&&n(mf),v(jr,t),t&&n(ff),v(en,t),t&&n(hf),v(Sr,t),t&&n(uf),v(Mr,t),t&&n(gf),t&&n(ni),t&&n(_f),v(Fr,t),t&&n(vf),t&&n(tn),t&&n(wf),v(Lr,t),t&&n(bf),t&&n(ri),t&&n(kf),t&&n(oi),t&&n(Pf),t&&n(le),t&&n(Tf),t&&n(Xe),v(Gr),t&&n(yf),t&&n(ye),t&&n(xf),t&&n(Ae),t&&n($f),v(Nr,t),t&&n(Ef),t&&n(si),t&&n(qf),v(Or,t),t&&n(Af),t&&n(ai),t&&n(Cf),t&&n(rn),t&&n(zf),t&&n(Ke),v(Rr),t&&n(Df),t&&n(ii),t&&n(If),t&&n(sn),t&&n(jf),t&&n(li),t&&n(Sf),t&&n(pi),t&&n(Mf),v(Hr,t),t&&n(Ff),t&&n(ci),t&&n(Lf),t&&n(Je),v(Wr),t&&n(Uf),t&&n(di),t&&n(Gf),t&&n(et),v(Qr),t&&n(Nf),t&&n(tt),v(Vr),t&&n(Of),t&&n(J),v(Br),v(Kr),t&&n(Rf),t&&n(rt),v(eo),t&&n(Hf),t&&n(he),v(to),v(no),t&&n(Wf),t&&n(ot),v(oo),t&&n(Qf),t&&n(D),v(so),v(ao),v(io),v(po),v(co),v(mo),t&&n(Vf),t&&n(M),v(fo),v(uo),v(go),t&&n(Bf),t&&n(at),v(_o),t&&n(Zf),t&&n(ee),v(vo),v(ko),t&&n(Yf),t&&n(lt),v(Po),t&&n(Xf),t&&n(H),v(To),v(kn),v($o),t&&n(Kf),t&&n(dt),v(Eo),t&&n(Jf),t&&n(te),v(qo),v(Do),t&&n(eh),t&&n(ft),v(Io),t&&n(th),t&&n(ne),v(jo),v(Lo),t&&n(nh),t&&n(ut),v(Uo),t&&n(rh),t&&n(I),v(Go),v(Ro),v(Ho),v(Wo),v(Qo),t&&n(oh),t&&n(zn),t&&n(sh),t&&n(vt),v(Vo),t&&n(ah),t&&n(re),v(Bo),v(Ko),t&&n(ih),t&&n(bt),v(Jo),t&&n(lh),t&&n(j),v(es),v(rs),v(os),v(ss),v(is),t&&n(ph),t&&n(Tt),v(ls),t&&n(ch),t&&n(F),v(ps),v(ds),v(ms),t&&n(dh),t&&n(xt),v(fs),t&&n(mh),t&&n(oe),v(hs),v(vs),v(bs),v(ks),t&&n(fh),t&&n(Et),v(Ps),t&&n(hh),t&&n(Q),v(Ts),v(Es),t&&n(uh),t&&n(Ct),v(qs),t&&n(gh),t&&n(se),v(As),v(Is),t&&n(_h),t&&n(Dt),v(js),t&&n(vh),t&&n(S),v(Ss),v(Ls),v(Us),v(Gs),t&&n(wh),t&&n(jt),v(Ns),t&&n(bh),t&&n(q),v(Os),v(Ws),v(Qs),v(Vs),v(Bs),v(Zs),t&&n(kh),t&&n(Ft),v(Ys),t&&n(Ph),t&&n(U),v(Xs),v(ea),v(ta),t&&n(Th),t&&n(Ut),v(na),t&&n(yh),t&&n(V),v(ra),v(ia),t&&n(xh),t&&n(Nt),v(pa),t&&n($h),t&&n(ae),v(ca),v(fa),t&&n(Eh),t&&n(Ht),v(ha),t&&n(qh),t&&n(P),v(ua),v(ga),v(_a),v(va),v(wa),v(ba),v(Pa),v(ya),v(xa),v($a)}}}const AA={local:"pipelines",sections:[{local:"transformers.pipeline",title:"The pipeline abstraction"},{local:"pipeline-batching",title:"Pipeline batching"},{local:"pipeline-chunk-batching",title:"Pipeline chunk batching"},{local:"pipeline-custom-code",title:"Pipeline custom code"},{local:"implementing-a-pipeline",title:"Implementing a pipeline"},{local:"the-task-specific-pipelines",sections:[{local:"transformers.AudioClassificationPipeline",title:"AudioClassificationPipeline"},{local:"transformers.AutomaticSpeechRecognitionPipeline",title:"AutomaticSpeechRecognitionPipeline"},{local:"transformers.Conversation",title:"ConversationalPipeline"},{local:"transformers.FeatureExtractionPipeline",title:"FeatureExtractionPipeline"},{local:"transformers.FillMaskPipeline",title:"FillMaskPipeline"},{local:"transformers.ImageClassificationPipeline",title:"ImageClassificationPipeline"},{local:"transformers.ImageSegmentationPipeline",title:"ImageSegmentationPipeline"},{local:"transformers.TokenClassificationPipeline",title:"NerPipeline"},{local:"transformers.ObjectDetectionPipeline",title:"ObjectDetectionPipeline"},{local:"transformers.QuestionAnsweringPipeline",title:"QuestionAnsweringPipeline"},{local:"transformers.SummarizationPipeline",title:"SummarizationPipeline"},{local:"transformers.TableQuestionAnsweringPipeline",title:"TableQuestionAnsweringPipeline"},{local:"transformers.TextClassificationPipeline",title:"TextClassificationPipeline"},{local:"transformers.TextGenerationPipeline",title:"TextGenerationPipeline"},{local:"transformers.Text2TextGenerationPipeline",title:"Text2TextGenerationPipeline"},{local:"transformers.TokenClassificationPipeline",title:"TokenClassificationPipeline"},{local:"transformers.TranslationPipeline",title:"TranslationPipeline"},{local:"transformers.ZeroShotClassificationPipeline",title:"ZeroShotClassificationPipeline"},{local:"transformers.ZeroShotImageClassificationPipeline",title:"ZeroShotImageClassificationPipeline"}],title:"The task specific pipelines"},{local:"transformers.Pipeline",title:"Parent class: `Pipeline`"}],title:"Pipelines"};function CA(vr,x,ie){let{fw:$}=x;return vr.$$set=A=>{"fw"in A&&ie(0,$=A.fw)},[$]}class FA extends PA{constructor(x){super();TA(this,x,CA,qA,yA,{fw:0})}}export{FA as default,AA as metadata};
407
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/tasks/audio_classification.mdx-cab2e6ac.js
import{S as Es,i as xs,s as As,e as r,k as f,w as v,t as l,M as Ts,c as o,d as a,m as h,a as i,x as w,h as n,b as m,F as t,g as p,y as j,q as k,o as y,B as E}from"../../chunks/vendor-4833417e.js";import{T as Wt}from"../../chunks/Tip-fffd6df1.js";import{Y as Ps}from"../../chunks/Youtube-27813aed.js";import{I as Ia}from"../../chunks/IconCopyLink-4b81c553.js";import{C as M}from"../../chunks/CodeBlock-6a3d1b46.js";import"../../chunks/CopyButton-dacfbfaf.js";function qs(F){let c,b,u,_,$;return{c(){c=r("p"),b=l("See the audio classification "),u=r("a"),_=l("task page"),$=l(" for more information about its associated models, datasets, and metrics."),this.h()},l(d){c=o(d,"P",{});var g=i(c);b=n(g,"See the audio classification "),u=o(g,"A",{href:!0,rel:!0});var x=i(u);_=n(x,"task page"),x.forEach(a),$=n(g," for more information about its associated models, datasets, and metrics."),g.forEach(a),this.h()},h(){m(u,"href","https://huggingface.co/tasks/audio-classification"),m(u,"rel","nofollow")},m(d,g){p(d,c,g),t(c,b),t(c,u),t(u,_),t(c,$)},d(d){d&&a(c)}}}function Cs(F){let c,b,u,_,$,d,g,x;return{c(){c=r("p"),b=l("If you aren\u2019t familiar with fine-tuning a model with the "),u=r("a"),_=l("Trainer"),$=l(", take a look at the basic tutorial "),d=r("a"),g=l("here"),x=l("!"),this.h()},l(I){c=o(I,"P",{});var A=i(c);b=n(A,"If you aren\u2019t familiar with fine-tuning a model with the "),u=o(A,"A",{href:!0});var q=i(u);_=n(q,"Trainer"),q.forEach(a),$=n(A,", take a look at the basic tutorial "),d=o(A,"A",{href:!0});var J=i(d);g=n(J,"here"),J.forEach(a),x=n(A,"!"),A.forEach(a),this.h()},h(){m(u,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),m(d,"href","training#finetune-with-trainer")},m(I,A){p(I,c,A),t(c,b),t(c,u),t(u,_),t(c,$),t(c,d),t(d,g),t(c,x)},d(I){I&&a(c)}}}function Ss(F){let c,b,u,_,$;return{c(){c=r("p"),b=l("For a more in-depth example of how to fine-tune a model for audio classification, take a look at the corresponding "),u=r("a"),_=l("PyTorch notebook"),$=l("."),this.h()},l(d){c=o(d,"P",{});var g=i(c);b=n(g,"For a more in-depth example of how to fine-tune a model for audio classification, take a look at the corresponding "),u=o(g,"A",{href:!0,rel:!0});var x=i(u);_=n(x,"PyTorch notebook"),x.forEach(a),$=n(g,"."),g.forEach(a),this.h()},h(){m(u,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/audio_classification.ipynb"),m(u,"rel","nofollow")},m(d,g){p(d,c,g),t(c,b),t(c,u),t(u,_),t(c,$)},d(d){d&&a(c)}}}function Ls(F){let c,b,u,_,$,d,g,x,I,A,q,J,be,Oa,Xe,C,Na,Q,Ba,Ma,X,Ra,Ua,Ze,R,ea,O,U,De,Z,Wa,Fe,Va,aa,$e,Ya,ta,ee,sa,ve,za,la,ae,na,T,Ha,Ie,Ka,Ga,Oe,Ja,Qa,Ne,Xa,Za,ra,te,oa,we,et,ia,se,pa,S,at,Be,tt,st,Me,lt,nt,ca,N,W,Re,le,rt,Ue,ot,fa,je,it,ha,ne,ua,ke,pt,ma,L,re,ct,We,ft,ht,ut,oe,mt,ye,dt,_t,gt,Ve,bt,da,ie,_a,P,$t,pe,Ye,vt,wt,ze,jt,kt,He,yt,Et,ga,ce,ba,B,V,Ke,fe,xt,Ge,At,$a,Y,Tt,Ee,Pt,qt,va,he,wa,z,ja,xe,Ct,ka,D,ue,St,Ae,Lt,Dt,Ft,me,It,Te,Ot,Nt,Bt,de,Mt,Pe,Rt,Ut,ya,_e,Ea,H,xa;return d=new Ia({}),q=new Ps({props:{id:"KWwzcmG98Ds"}}),R=new Wt({props:{$$slots:{default:[qs]},$$scope:{ctx:F}}}),Z=new Ia({}),ee=new M({props:{code:`from datasets import load_dataset ks = load_dataset("superb", "ks")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>ks = load_dataset(<span class="hljs-string">&quot;superb&quot;</span>, <span class="hljs-string">&quot;ks&quot;</span>)`}}),ae=new M({props:{code:'ks["train"][0]',highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>ks[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;audio&#x27;</span>: {<span class="hljs-string">&#x27;array&#x27;</span>: array([ <span class="hljs-number">0.</span> , <span class="hljs-number">0.</span> , <span class="hljs-number">0.</span> , ..., -<span class="hljs-number">0.00592041</span>, -<span class="hljs-number">0.00405884</span>, -<span class="hljs-number">0.00253296</span>], dtype=float32), <span class="hljs-string">&#x27;path&#x27;</span>: <span class="hljs-string">&#x27;/root/.cache/huggingface/datasets/downloads/extracted/05734a36d88019a09725c20cc024e1c4e7982e37d7d55c0c1ca1742ea1cdd47f/_background_noise_/doing_the_dishes.wav&#x27;</span>, <span class="hljs-string">&#x27;sampling_rate&#x27;</span>: <span class="hljs-number">16000</span>}, <span class="hljs-string">&#x27;file&#x27;</span>: <span class="hljs-string">&#x27;/root/.cache/huggingface/datasets/downloads/extracted/05734a36d88019a09725c20cc024e1c4e7982e37d7d55c0c1ca1742ea1cdd47f/_background_noise_/doing_the_dishes.wav&#x27;</span>, <span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-number">10</span>}`}}),te=new M({props:{code:`labels = ks["train"].features["label"].names label2id, id2label = dict(), dict() for i, label in enumerate(labels): label2id[label] = str(i) id2label[str(i)] = label`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>labels = ks[<span class="hljs-string">&quot;train&quot;</span>].features[<span class="hljs-string">&quot;label&quot;</span>].names <span class="hljs-meta">&gt;&gt;&gt; </span>label2id, id2label = <span class="hljs-built_in">dict</span>(), <span class="hljs-built_in">dict</span>() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> i, label <span class="hljs-keyword">in</span> <span class="hljs-built_in">enumerate</span>(labels): <span class="hljs-meta">... </span> label2id[label] = <span class="hljs-built_in">str</span>(i) <span class="hljs-meta">... </span> id2label[<span class="hljs-built_in">str</span>(i)] = label`}}),se=new M({props:{code:"id2label[str(10)]",highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>id2label[<span class="hljs-built_in">str</span>(<span class="hljs-number">10</span>)] <span class="hljs-string">&#x27;_silence_&#x27;</span>`}}),le=new Ia({}),ne=new M({props:{code:`from transformers import AutoFeatureExtractor feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoFeatureExtractor <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = AutoFeatureExtractor.from_pretrained(<span class="hljs-string">&quot;facebook/wav2vec2-base&quot;</span>)`}}),ie=new M({props:{code:`def preprocess_function(examples): audio_arrays = [x["array"] for x in examples["audio"]] inputs = feature_extractor( audio_arrays, sampling_rate=feature_extractor.sampling_rate, max_length=16000, truncation=True ) return inputs`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">preprocess_function</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> audio_arrays = [x[<span class="hljs-string">&quot;array&quot;</span>] <span class="hljs-keyword">for</span> x <span class="hljs-keyword">in</span> examples[<span class="hljs-string">&quot;audio&quot;</span>]] <span class="hljs-meta">... </span> inputs = feature_extractor( <span class="hljs-meta">... </span> audio_arrays, sampling_rate=feature_extractor.sampling_rate, max_length=<span class="hljs-number">16000</span>, truncation=<span class="hljs-literal">True</span> <span class="hljs-meta">... </span> ) <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> inputs`}}),ce=new M({props:{code:'encoded_ks = ks.map(preprocess_function, remove_columns=["audio", "file"], batched=True)',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>encoded_ks = ks.<span class="hljs-built_in">map</span>(preprocess_function, remove_columns=[<span class="hljs-string">&quot;audio&quot;</span>, <span class="hljs-string">&quot;file&quot;</span>], batched=<span class="hljs-literal">True</span>)'}}),fe=new Ia({}),he=new M({props:{code:`from transformers import AutoModelForAudioClassification, TrainingArguments, Trainer num_labels = len(id2label) model = AutoModelForAudioClassification.from_pretrained( "facebook/wav2vec2-base", num_labels=num_labels, label2id=label2id, id2label=id2label )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForAudioClassification, TrainingArguments, Trainer <span class="hljs-meta">&gt;&gt;&gt; </span>num_labels = <span class="hljs-built_in">len</span>(id2label) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForAudioClassification.from_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;facebook/wav2vec2-base&quot;</span>, num_labels=num_labels, label2id=label2id, id2label=id2label <span class="hljs-meta">... </span>)`}}),z=new Wt({props:{$$slots:{default:[Cs]},$$scope:{ctx:F}}}),_e=new M({props:{code:`training_args = TrainingArguments( output_dir="./results", evaluation_strategy="epoch", save_strategy="epoch", learning_rate=3e-5, num_train_epochs=5, ) trainer = Trainer( model=model, args=training_args, train_dataset=encoded_ks["train"], eval_dataset=encoded_ks["validation"], tokenizer=feature_extractor, ) trainer.train()`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> evaluation_strategy=<span class="hljs-string">&quot;epoch&quot;</span>, <span class="hljs-meta">... </span> save_strategy=<span class="hljs-string">&quot;epoch&quot;</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">3e-5</span>, <span class="hljs-meta">... </span> num_train_epochs=<span class="hljs-number">5</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=encoded_ks[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=encoded_ks[<span class="hljs-string">&quot;validation&quot;</span>], <span class="hljs-meta">... </span> tokenizer=feature_extractor, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()`}}),H=new Wt({props:{$$slots:{default:[Ss]},$$scope:{ctx:F}}}),{c(){c=r("meta"),b=f(),u=r("h1"),_=r("a"),$=r("span"),v(d.$$.fragment),g=f(),x=r("span"),I=l("Audio classification"),A=f(),v(q.$$.fragment),J=f(),be=r("p"),Oa=l("Audio classification assigns a label or class to audio data. It is similar to text classification, except an audio input is continuous and must be discretized, whereas text can be split into tokens. Some practical applications of audio classification include identifying intent, speakers, and even animal species by their sounds."),Xe=f(),C=r("p"),Na=l("This guide will show you how to fine-tune "),Q=r("a"),Ba=l("Wav2Vec2"),Ma=l(" on the Keyword Spotting subset of the "),X=r("a"),Ra=l("SUPERB"),Ua=l(" benchmark to classify utterances."),Ze=f(),v(R.$$.fragment),ea=f(),O=r("h2"),U=r("a"),De=r("span"),v(Z.$$.fragment),Wa=f(),Fe=r("span"),Va=l("Load SUPERB dataset"),aa=f(),$e=r("p"),Ya=l("Load the SUPERB dataset from the \u{1F917} Datasets library:"),ta=f(),v(ee.$$.fragment),sa=f(),ve=r("p"),za=l("Then take a look at an example:"),la=f(),v(ae.$$.fragment),na=f(),T=r("p"),Ha=l("The "),Ie=r("code"),Ka=l("audio"),Ga=l(" column contains a 1-dimensional "),Oe=r("code"),Ja=l("array"),Qa=l(" of the speech signal that must be called to load and resample the audio file. The "),Ne=r("code"),Xa=l("label"),Za=l(" column is an integer that represents the utterance class. Create a dictionary that maps a label name to an integer and vice versa. The mapping will help the model recover the label name from the label number:"),ra=f(),v(te.$$.fragment),oa=f(),we=r("p"),et=l("Now you can convert the label number to a label name for more information:"),ia=f(),v(se.$$.fragment),pa=f(),S=r("p"),at=l("Each keyword - or label - corresponds to a number; "),Be=r("code"),tt=l("10"),st=l(" indicates "),Me=r("code"),lt=l("silence"),nt=l(" in the example above."),ca=f(),N=r("h2"),W=r("a"),Re=r("span"),v(le.$$.fragment),rt=f(),Ue=r("span"),ot=l("Preprocess"),fa=f(),je=r("p"),it=l("Load the Wav2Vec2 feature extractor to process the audio signal:"),ha=f(),v(ne.$$.fragment),ua=f(),ke=r("p"),pt=l("The preprocessing function needs to:"),ma=f(),L=r("ol"),re=r("li"),ct=l("Call the "),We=r("code"),ft=l("audio"),ht=l(" column to load and if necessary resample the audio file."),ut=f(),oe=r("li"),mt=l("Check the sampling rate of the audio file matches the sampling rate of the audio data a model was pretrained with. You can find this information on the Wav2Vec2 "),ye=r("a"),dt=l("model card"),_t=l("."),gt=f(),Ve=r("li"),bt=l("Set a maximum input length so longer inputs are batched without being truncated."),da=f(),v(ie.$$.fragment),_a=f(),P=r("p"),$t=l("Use \u{1F917} Datasets "),pe=r("a"),Ye=r("code"),vt=l("map"),wt=l(" function to apply the preprocessing function over the entire dataset. You can speed up the "),ze=r("code"),jt=l("map"),kt=l(" function by setting "),He=r("code"),yt=l("batched=True"),Et=l(" to process multiple elements of the dataset at once. Remove the columns you don\u2019t need:"),ga=f(),v(ce.$$.fragment),ba=f(),B=r("h2"),V=r("a"),Ke=r("span"),v(fe.$$.fragment),xt=f(),Ge=r("span"),At=l("Fine-tune with Trainer"),$a=f(),Y=r("p"),Tt=l("Load Wav2Vec2 with "),Ee=r("a"),Pt=l("AutoModelForAudioClassification"),qt=l(". Specify the number of labels, and pass the model the mapping between label number and label class:"),va=f(),v(he.$$.fragment),wa=f(),v(z.$$.fragment),ja=f(),xe=r("p"),Ct=l("At this point, only three steps remain:"),ka=f(),D=r("ol"),ue=r("li"),St=l("Define your training hyperparameters in "),Ae=r("a"),Lt=l("TrainingArguments"),Dt=l("."),Ft=f(),me=r("li"),It=l("Pass the training arguments to "),Te=r("a"),Ot=l("Trainer"),Nt=l(" along with the model, datasets, and feature extractor."),Bt=f(),de=r("li"),Mt=l("Call "),Pe=r("a"),Rt=l("train()"),Ut=l(" to fine-tune your model."),ya=f(),v(_e.$$.fragment),Ea=f(),v(H.$$.fragment),this.h()},l(e){const s=Ts('[data-svelte="svelte-1phssyn"]',document.head);c=o(s,"META",{name:!0,content:!0}),s.forEach(a),b=h(e),u=o(e,"H1",{class:!0});var ge=i(u);_=o(ge,"A",{id:!0,class:!0,href:!0});var Je=i(_);$=o(Je,"SPAN",{});var Qe=i($);w(d.$$.fragment,Qe),Qe.forEach(a),Je.forEach(a),g=h(ge),x=o(ge,"SPAN",{});var Vt=i(x);I=n(Vt,"Audio classification"),Vt.forEach(a),ge.forEach(a),A=h(e),w(q.$$.fragment,e),J=h(e),be=o(e,"P",{});var Yt=i(be);Oa=n(Yt,"Audio classification assigns a label or class to audio data. It is similar to text classification, except an audio input is continuous and must be discretized, whereas text can be split into tokens. Some practical applications of audio classification include identifying intent, speakers, and even animal species by their sounds."),Yt.forEach(a),Xe=h(e),C=o(e,"P",{});var qe=i(C);Na=n(qe,"This guide will show you how to fine-tune "),Q=o(qe,"A",{href:!0,rel:!0});var zt=i(Q);Ba=n(zt,"Wav2Vec2"),zt.forEach(a),Ma=n(qe," on the Keyword Spotting subset of the "),X=o(qe,"A",{href:!0,rel:!0});var Ht=i(X);Ra=n(Ht,"SUPERB"),Ht.forEach(a),Ua=n(qe," benchmark to classify utterances."),qe.forEach(a),Ze=h(e),w(R.$$.fragment,e),ea=h(e),O=o(e,"H2",{class:!0});var Aa=i(O);U=o(Aa,"A",{id:!0,class:!0,href:!0});var Kt=i(U);De=o(Kt,"SPAN",{});var Gt=i(De);w(Z.$$.fragment,Gt),Gt.forEach(a),Kt.forEach(a),Wa=h(Aa),Fe=o(Aa,"SPAN",{});var Jt=i(Fe);Va=n(Jt,"Load SUPERB dataset"),Jt.forEach(a),Aa.forEach(a),aa=h(e),$e=o(e,"P",{});var Qt=i($e);Ya=n(Qt,"Load the SUPERB dataset from the \u{1F917} Datasets library:"),Qt.forEach(a),ta=h(e),w(ee.$$.fragment,e),sa=h(e),ve=o(e,"P",{});var Xt=i(ve);za=n(Xt,"Then take a look at an example:"),Xt.forEach(a),la=h(e),w(ae.$$.fragment,e),na=h(e),T=o(e,"P",{});var K=i(T);Ha=n(K,"The "),Ie=o(K,"CODE",{});var Zt=i(Ie);Ka=n(Zt,"audio"),Zt.forEach(a),Ga=n(K," column contains a 1-dimensional "),Oe=o(K,"CODE",{});var es=i(Oe);Ja=n(es,"array"),es.forEach(a),Qa=n(K," of the speech signal that must be called to load and resample the audio file. The "),Ne=o(K,"CODE",{});var as=i(Ne);Xa=n(as,"label"),as.forEach(a),Za=n(K," column is an integer that represents the utterance class. Create a dictionary that maps a label name to an integer and vice versa. The mapping will help the model recover the label name from the label number:"),K.forEach(a),ra=h(e),w(te.$$.fragment,e),oa=h(e),we=o(e,"P",{});var ts=i(we);et=n(ts,"Now you can convert the label number to a label name for more information:"),ts.forEach(a),ia=h(e),w(se.$$.fragment,e),pa=h(e),S=o(e,"P",{});var Ce=i(S);at=n(Ce,"Each keyword - or label - corresponds to a number; "),Be=o(Ce,"CODE",{});var ss=i(Be);tt=n(ss,"10"),ss.forEach(a),st=n(Ce," indicates "),Me=o(Ce,"CODE",{});var ls=i(Me);lt=n(ls,"silence"),ls.forEach(a),nt=n(Ce," in the example above."),Ce.forEach(a),ca=h(e),N=o(e,"H2",{class:!0});var Ta=i(N);W=o(Ta,"A",{id:!0,class:!0,href:!0});var ns=i(W);Re=o(ns,"SPAN",{});var rs=i(Re);w(le.$$.fragment,rs),rs.forEach(a),ns.forEach(a),rt=h(Ta),Ue=o(Ta,"SPAN",{});var os=i(Ue);ot=n(os,"Preprocess"),os.forEach(a),Ta.forEach(a),fa=h(e),je=o(e,"P",{});var is=i(je);it=n(is,"Load the Wav2Vec2 feature extractor to process the audio signal:"),is.forEach(a),ha=h(e),w(ne.$$.fragment,e),ua=h(e),ke=o(e,"P",{});var ps=i(ke);pt=n(ps,"The preprocessing function needs to:"),ps.forEach(a),ma=h(e),L=o(e,"OL",{});var Se=i(L);re=o(Se,"LI",{});var Pa=i(re);ct=n(Pa,"Call the "),We=o(Pa,"CODE",{});var cs=i(We);ft=n(cs,"audio"),cs.forEach(a),ht=n(Pa," column to load and if necessary resample the audio file."),Pa.forEach(a),ut=h(Se),oe=o(Se,"LI",{});var qa=i(oe);mt=n(qa,"Check the sampling rate of the audio file matches the sampling rate of the audio data a model was pretrained with. You can find this information on the Wav2Vec2 "),ye=o(qa,"A",{href:!0});var fs=i(ye);dt=n(fs,"model card"),fs.forEach(a),_t=n(qa,"."),qa.forEach(a),gt=h(Se),Ve=o(Se,"LI",{});var hs=i(Ve);bt=n(hs,"Set a maximum input length so longer inputs are batched without being truncated."),hs.forEach(a),Se.forEach(a),da=h(e),w(ie.$$.fragment,e),_a=h(e),P=o(e,"P",{});var G=i(P);$t=n(G,"Use \u{1F917} Datasets "),pe=o(G,"A",{href:!0,rel:!0});var us=i(pe);Ye=o(us,"CODE",{});var ms=i(Ye);vt=n(ms,"map"),ms.forEach(a),us.forEach(a),wt=n(G," function to apply the preprocessing function over the entire dataset. You can speed up the "),ze=o(G,"CODE",{});var ds=i(ze);jt=n(ds,"map"),ds.forEach(a),kt=n(G," function by setting "),He=o(G,"CODE",{});var _s=i(He);yt=n(_s,"batched=True"),_s.forEach(a),Et=n(G," to process multiple elements of the dataset at once. Remove the columns you don\u2019t need:"),G.forEach(a),ga=h(e),w(ce.$$.fragment,e),ba=h(e),B=o(e,"H2",{class:!0});var Ca=i(B);V=o(Ca,"A",{id:!0,class:!0,href:!0});var gs=i(V);Ke=o(gs,"SPAN",{});var bs=i(Ke);w(fe.$$.fragment,bs),bs.forEach(a),gs.forEach(a),xt=h(Ca),Ge=o(Ca,"SPAN",{});var $s=i(Ge);At=n($s,"Fine-tune with Trainer"),$s.forEach(a),Ca.forEach(a),$a=h(e),Y=o(e,"P",{});var Sa=i(Y);Tt=n(Sa,"Load Wav2Vec2 with "),Ee=o(Sa,"A",{href:!0});var vs=i(Ee);Pt=n(vs,"AutoModelForAudioClassification"),vs.forEach(a),qt=n(Sa,". Specify the number of labels, and pass the model the mapping between label number and label class:"),Sa.forEach(a),va=h(e),w(he.$$.fragment,e),wa=h(e),w(z.$$.fragment,e),ja=h(e),xe=o(e,"P",{});var ws=i(xe);Ct=n(ws,"At this point, only three steps remain:"),ws.forEach(a),ka=h(e),D=o(e,"OL",{});var Le=i(D);ue=o(Le,"LI",{});var La=i(ue);St=n(La,"Define your training hyperparameters in "),Ae=o(La,"A",{href:!0});var js=i(Ae);Lt=n(js,"TrainingArguments"),js.forEach(a),Dt=n(La,"."),La.forEach(a),Ft=h(Le),me=o(Le,"LI",{});var Da=i(me);It=n(Da,"Pass the training arguments to "),Te=o(Da,"A",{href:!0});var ks=i(Te);Ot=n(ks,"Trainer"),ks.forEach(a),Nt=n(Da," along with the model, datasets, and feature extractor."),Da.forEach(a),Bt=h(Le),de=o(Le,"LI",{});var Fa=i(de);Mt=n(Fa,"Call "),Pe=o(Fa,"A",{href:!0});var ys=i(Pe);Rt=n(ys,"train()"),ys.forEach(a),Ut=n(Fa," to fine-tune your model."),Fa.forEach(a),Le.forEach(a),ya=h(e),w(_e.$$.fragment,e),Ea=h(e),w(H.$$.fragment,e),this.h()},h(){m(c,"name","hf:doc:metadata"),m(c,"content",JSON.stringify(Ds)),m(_,"id","audio-classification"),m(_,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(_,"href","#audio-classification"),m(u,"class","relative group"),m(Q,"href","https://huggingface.co/facebook/wav2vec2-base"),m(Q,"rel","nofollow"),m(X,"href","https://huggingface.co/datasets/superb"),m(X,"rel","nofollow"),m(U,"id","load-superb-dataset"),m(U,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(U,"href","#load-superb-dataset"),m(O,"class","relative group"),m(W,"id","preprocess"),m(W,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(W,"href","#preprocess"),m(N,"class","relative group"),m(ye,"href","(https://huggingface.co/facebook/wav2vec2-base)"),m(pe,"href","https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map"),m(pe,"rel","nofollow"),m(V,"id","finetune-with-trainer"),m(V,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(V,"href","#finetune-with-trainer"),m(B,"class","relative group"),m(Ee,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoModelForAudioClassification"),m(Ae,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments"),m(Te,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),m(Pe,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer.train")},m(e,s){t(document.head,c),p(e,b,s),p(e,u,s),t(u,_),t(_,$),j(d,$,null),t(u,g),t(u,x),t(x,I),p(e,A,s),j(q,e,s),p(e,J,s),p(e,be,s),t(be,Oa),p(e,Xe,s),p(e,C,s),t(C,Na),t(C,Q),t(Q,Ba),t(C,Ma),t(C,X),t(X,Ra),t(C,Ua),p(e,Ze,s),j(R,e,s),p(e,ea,s),p(e,O,s),t(O,U),t(U,De),j(Z,De,null),t(O,Wa),t(O,Fe),t(Fe,Va),p(e,aa,s),p(e,$e,s),t($e,Ya),p(e,ta,s),j(ee,e,s),p(e,sa,s),p(e,ve,s),t(ve,za),p(e,la,s),j(ae,e,s),p(e,na,s),p(e,T,s),t(T,Ha),t(T,Ie),t(Ie,Ka),t(T,Ga),t(T,Oe),t(Oe,Ja),t(T,Qa),t(T,Ne),t(Ne,Xa),t(T,Za),p(e,ra,s),j(te,e,s),p(e,oa,s),p(e,we,s),t(we,et),p(e,ia,s),j(se,e,s),p(e,pa,s),p(e,S,s),t(S,at),t(S,Be),t(Be,tt),t(S,st),t(S,Me),t(Me,lt),t(S,nt),p(e,ca,s),p(e,N,s),t(N,W),t(W,Re),j(le,Re,null),t(N,rt),t(N,Ue),t(Ue,ot),p(e,fa,s),p(e,je,s),t(je,it),p(e,ha,s),j(ne,e,s),p(e,ua,s),p(e,ke,s),t(ke,pt),p(e,ma,s),p(e,L,s),t(L,re),t(re,ct),t(re,We),t(We,ft),t(re,ht),t(L,ut),t(L,oe),t(oe,mt),t(oe,ye),t(ye,dt),t(oe,_t),t(L,gt),t(L,Ve),t(Ve,bt),p(e,da,s),j(ie,e,s),p(e,_a,s),p(e,P,s),t(P,$t),t(P,pe),t(pe,Ye),t(Ye,vt),t(P,wt),t(P,ze),t(ze,jt),t(P,kt),t(P,He),t(He,yt),t(P,Et),p(e,ga,s),j(ce,e,s),p(e,ba,s),p(e,B,s),t(B,V),t(V,Ke),j(fe,Ke,null),t(B,xt),t(B,Ge),t(Ge,At),p(e,$a,s),p(e,Y,s),t(Y,Tt),t(Y,Ee),t(Ee,Pt),t(Y,qt),p(e,va,s),j(he,e,s),p(e,wa,s),j(z,e,s),p(e,ja,s),p(e,xe,s),t(xe,Ct),p(e,ka,s),p(e,D,s),t(D,ue),t(ue,St),t(ue,Ae),t(Ae,Lt),t(ue,Dt),t(D,Ft),t(D,me),t(me,It),t(me,Te),t(Te,Ot),t(me,Nt),t(D,Bt),t(D,de),t(de,Mt),t(de,Pe),t(Pe,Rt),t(de,Ut),p(e,ya,s),j(_e,e,s),p(e,Ea,s),j(H,e,s),xa=!0},p(e,[s]){const ge={};s&2&&(ge.$$scope={dirty:s,ctx:e}),R.$set(ge);const Je={};s&2&&(Je.$$scope={dirty:s,ctx:e}),z.$set(Je);const Qe={};s&2&&(Qe.$$scope={dirty:s,ctx:e}),H.$set(Qe)},i(e){xa||(k(d.$$.fragment,e),k(q.$$.fragment,e),k(R.$$.fragment,e),k(Z.$$.fragment,e),k(ee.$$.fragment,e),k(ae.$$.fragment,e),k(te.$$.fragment,e),k(se.$$.fragment,e),k(le.$$.fragment,e),k(ne.$$.fragment,e),k(ie.$$.fragment,e),k(ce.$$.fragment,e),k(fe.$$.fragment,e),k(he.$$.fragment,e),k(z.$$.fragment,e),k(_e.$$.fragment,e),k(H.$$.fragment,e),xa=!0)},o(e){y(d.$$.fragment,e),y(q.$$.fragment,e),y(R.$$.fragment,e),y(Z.$$.fragment,e),y(ee.$$.fragment,e),y(ae.$$.fragment,e),y(te.$$.fragment,e),y(se.$$.fragment,e),y(le.$$.fragment,e),y(ne.$$.fragment,e),y(ie.$$.fragment,e),y(ce.$$.fragment,e),y(fe.$$.fragment,e),y(he.$$.fragment,e),y(z.$$.fragment,e),y(_e.$$.fragment,e),y(H.$$.fragment,e),xa=!1},d(e){a(c),e&&a(b),e&&a(u),E(d),e&&a(A),E(q,e),e&&a(J),e&&a(be),e&&a(Xe),e&&a(C),e&&a(Ze),E(R,e),e&&a(ea),e&&a(O),E(Z),e&&a(aa),e&&a($e),e&&a(ta),E(ee,e),e&&a(sa),e&&a(ve),e&&a(la),E(ae,e),e&&a(na),e&&a(T),e&&a(ra),E(te,e),e&&a(oa),e&&a(we),e&&a(ia),E(se,e),e&&a(pa),e&&a(S),e&&a(ca),e&&a(N),E(le),e&&a(fa),e&&a(je),e&&a(ha),E(ne,e),e&&a(ua),e&&a(ke),e&&a(ma),e&&a(L),e&&a(da),E(ie,e),e&&a(_a),e&&a(P),e&&a(ga),E(ce,e),e&&a(ba),e&&a(B),E(fe),e&&a($a),e&&a(Y),e&&a(va),E(he,e),e&&a(wa),E(z,e),e&&a(ja),e&&a(xe),e&&a(ka),e&&a(D),e&&a(ya),E(_e,e),e&&a(Ea),E(H,e)}}}const Ds={local:"audio-classification",sections:[{local:"load-superb-dataset",title:"Load SUPERB dataset"},{local:"preprocess",title:"Preprocess"},{local:"finetune-with-trainer",title:"Fine-tune with Trainer"}],title:"Audio classification"};function Fs(F,c,b){let{fw:u}=c;return F.$$set=_=>{"fw"in _&&b(0,u=_.fw)},[u]}class Us extends Es{constructor(c){super();xs(this,c,Fs,Ls,As,{fw:0})}}export{Us as default,Ds as metadata};
408
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/tasks/token_classification.mdx-5446e455.js
import{S as Eo,i as yo,s as To,e as o,k as c,w as g,t as a,M as zo,c as r,d as e,m as h,a as i,x as j,h as n,b as f,F as t,g as p,y as w,q as k,o as b,B as $}from"../../chunks/vendor-4833417e.js";import{T as pa}from"../../chunks/Tip-fffd6df1.js";import{Y as xo}from"../../chunks/Youtube-27813aed.js";import{I as Xt}from"../../chunks/IconCopyLink-4b81c553.js";import{C}from"../../chunks/CodeBlock-6a3d1b46.js";import{C as qo}from"../../chunks/CodeBlockFw-27a176a0.js";import"../../chunks/CopyButton-dacfbfaf.js";function Co(P){let m,x,d,_,y;return{c(){m=o("p"),x=a("See the token classification "),d=o("a"),_=a("task page"),y=a(" for more information about other forms of token classification and their associated models, datasets, and metrics."),this.h()},l(u){m=r(u,"P",{});var v=i(m);x=n(v,"See the token classification "),d=r(v,"A",{href:!0,rel:!0});var T=i(d);_=n(T,"task page"),T.forEach(e),y=n(v," for more information about other forms of token classification and their associated models, datasets, and metrics."),v.forEach(e),this.h()},h(){f(d,"href","https://huggingface.co/tasks/token-classification"),f(d,"rel","nofollow")},m(u,v){p(u,m,v),t(m,x),t(m,d),t(d,_),t(m,y)},d(u){u&&e(m)}}}function Ao(P){let m,x,d,_,y,u,v,T;return{c(){m=o("p"),x=a("If you aren\u2019t familiar with fine-tuning a model with the "),d=o("a"),_=a("Trainer"),y=a(", take a look at the basic tutorial "),u=o("a"),v=a("here"),T=a("!"),this.h()},l(z){m=r(z,"P",{});var E=i(m);x=n(E,"If you aren\u2019t familiar with fine-tuning a model with the "),d=r(E,"A",{href:!0});var A=i(d);_=n(A,"Trainer"),A.forEach(e),y=n(E,", take a look at the basic tutorial "),u=r(E,"A",{href:!0});var O=i(u);v=n(O,"here"),O.forEach(e),T=n(E,"!"),E.forEach(e),this.h()},h(){f(d,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),f(u,"href","training#finetune-with-trainer")},m(z,E){p(z,m,E),t(m,x),t(m,d),t(d,_),t(m,y),t(m,u),t(u,v),t(m,T)},d(z){z&&e(m)}}}function Do(P){let m,x,d,_,y;return{c(){m=o("p"),x=a("If you aren\u2019t familiar with fine-tuning a model with Keras, take a look at the basic tutorial "),d=o("a"),_=a("here"),y=a("!"),this.h()},l(u){m=r(u,"P",{});var v=i(m);x=n(v,"If you aren\u2019t familiar with fine-tuning a model with Keras, take a look at the basic tutorial "),d=r(v,"A",{href:!0});var T=i(d);_=n(T,"here"),T.forEach(e),y=n(v,"!"),v.forEach(e),this.h()},h(){f(d,"href","training#finetune-with-keras")},m(u,v){p(u,m,v),t(m,x),t(m,d),t(d,_),t(m,y)},d(u){u&&e(m)}}}function Fo(P){let m,x,d,_,y,u,v,T;return{c(){m=o("p"),x=a(`For a more in-depth example of how to fine-tune a model for token classification, take a look at the corresponding `),d=o("a"),_=a("PyTorch notebook"),y=a(` or `),u=o("a"),v=a("TensorFlow notebook"),T=a("."),this.h()},l(z){m=r(z,"P",{});var E=i(m);x=n(E,`For a more in-depth example of how to fine-tune a model for token classification, take a look at the corresponding `),d=r(E,"A",{href:!0,rel:!0});var A=i(d);_=n(A,"PyTorch notebook"),A.forEach(e),y=n(E,` or `),u=r(E,"A",{href:!0,rel:!0});var O=i(u);v=n(O,"TensorFlow notebook"),O.forEach(e),T=n(E,"."),E.forEach(e),this.h()},h(){f(d,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/token_classification.ipynb"),f(d,"rel","nofollow"),f(u,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/token_classification-tf.ipynb"),f(u,"rel","nofollow")},m(z,E){p(z,m,E),t(m,x),t(m,d),t(d,_),t(m,y),t(m,u),t(u,v),t(m,T)},d(z){z&&e(m)}}}function Po(P){let m,x,d,_,y,u,v,T,z,E,A,O,Ks,ca,se,L,ha,ms,fa,ma,ds,da,ua,te,Z,ee,H,J,dt,us,_a,ut,ga,ae,Vs,ja,ne,_s,le,Zs,wa,oe,gs,re,G,ka,_t,ba,$a,ie,js,pe,N,va,gt,xa,Ea,jt,ya,Ta,ce,I,Js,wt,za,qa,Ca,B,kt,Aa,Da,bt,Fa,Pa,$t,Sa,Oa,La,Gs,vt,Na,Ia,he,Y,Q,xt,ws,Ba,Et,Ma,fe,ks,me,X,Ra,yt,Ua,Wa,de,bs,ue,ss,Ha,Tt,Ya,Ka,_e,$s,ge,M,Va,zt,Za,Ja,qt,Ga,Qa,je,R,vs,Xa,xs,Ct,sn,tn,en,S,an,At,nn,ln,Dt,on,rn,Ft,pn,cn,hn,Es,fn,Pt,mn,dn,we,Qs,un,ke,ys,be,D,_n,Ts,St,gn,jn,Ot,wn,kn,Lt,bn,$n,$e,zs,ve,q,vn,Xs,xn,En,Nt,yn,Tn,It,zn,qn,Bt,Cn,An,xe,qs,Ee,K,ts,Mt,Cs,Dn,Rt,Fn,ye,es,Pn,st,Sn,On,Te,As,ze,as,qe,tt,Ln,Ce,U,Ds,Nn,et,In,Bn,Mn,Fs,Rn,at,Un,Wn,Hn,Ps,Yn,nt,Kn,Vn,Ae,Ss,De,V,ns,Ut,Os,Zn,Wt,Jn,Fe,lt,Gn,Pe,ls,Se,F,Qn,Ht,Xn,sl,Ls,Yt,tl,el,Kt,al,nl,Oe,Ns,Le,ot,ll,Ne,Is,Ie,os,ol,rt,rl,il,Be,Bs,Me,rs,pl,Ms,Vt,cl,hl,Re,Rs,Ue,is,fl,Us,Zt,ml,dl,We,Ws,He,ps,Ye;return u=new Xt({}),A=new xo({props:{id:"wVHdVlPScxA"}}),Z=new pa({props:{$$slots:{default:[Co]},$$scope:{ctx:P}}}),us=new Xt({}),_s=new C({props:{code:`from datasets import load_dataset wnut = load_dataset("wnut_17")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>wnut = load_dataset(<span class="hljs-string">&quot;wnut_17&quot;</span>)`}}),gs=new C({props:{code:'wnut["train"][0]',highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>wnut[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;id&#x27;</span>: <span class="hljs-string">&#x27;0&#x27;</span>, <span class="hljs-string">&#x27;ner_tags&#x27;</span>: [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">7</span>, <span class="hljs-number">8</span>, <span class="hljs-number">8</span>, <span class="hljs-number">0</span>, <span class="hljs-number">7</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], <span class="hljs-string">&#x27;tokens&#x27;</span>: [<span class="hljs-string">&#x27;@paulwalk&#x27;</span>, <span class="hljs-string">&#x27;It&#x27;</span>, <span class="hljs-string">&quot;&#x27;s&quot;</span>, <span class="hljs-string">&#x27;the&#x27;</span>, <span class="hljs-string">&#x27;view&#x27;</span>, <span class="hljs-string">&#x27;from&#x27;</span>, <span class="hljs-string">&#x27;where&#x27;</span>, <span class="hljs-string">&#x27;I&#x27;</span>, <span class="hljs-string">&quot;&#x27;m&quot;</span>, <span class="hljs-string">&#x27;living&#x27;</span>, <span class="hljs-string">&#x27;for&#x27;</span>, <span class="hljs-string">&#x27;two&#x27;</span>, <span class="hljs-string">&#x27;weeks&#x27;</span>, <span class="hljs-string">&#x27;.&#x27;</span>, <span class="hljs-string">&#x27;Empire&#x27;</span>, <span class="hljs-string">&#x27;State&#x27;</span>, <span class="hljs-string">&#x27;Building&#x27;</span>, <span class="hljs-string">&#x27;=&#x27;</span>, <span class="hljs-string">&#x27;ESB&#x27;</span>, <span class="hljs-string">&#x27;.&#x27;</span>, <span class="hljs-string">&#x27;Pretty&#x27;</span>, <span class="hljs-string">&#x27;bad&#x27;</span>, <span class="hljs-string">&#x27;storm&#x27;</span>, <span class="hljs-string">&#x27;here&#x27;</span>, <span class="hljs-string">&#x27;last&#x27;</span>, <span class="hljs-string">&#x27;evening&#x27;</span>, <span class="hljs-string">&#x27;.&#x27;</span>] }`}}),js=new C({props:{code:`label_list = wnut["train"].features[f"ner_tags"].feature.names label_list`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>label_list = wnut[<span class="hljs-string">&quot;train&quot;</span>].features[<span class="hljs-string">f&quot;ner_tags&quot;</span>].feature.names <span class="hljs-meta">&gt;&gt;&gt; </span>label_list [ <span class="hljs-string">&quot;O&quot;</span>, <span class="hljs-string">&quot;B-corporation&quot;</span>, <span class="hljs-string">&quot;I-corporation&quot;</span>, <span class="hljs-string">&quot;B-creative-work&quot;</span>, <span class="hljs-string">&quot;I-creative-work&quot;</span>, <span class="hljs-string">&quot;B-group&quot;</span>, <span class="hljs-string">&quot;I-group&quot;</span>, <span class="hljs-string">&quot;B-location&quot;</span>, <span class="hljs-string">&quot;I-location&quot;</span>, <span class="hljs-string">&quot;B-person&quot;</span>, <span class="hljs-string">&quot;I-person&quot;</span>, <span class="hljs-string">&quot;B-product&quot;</span>, <span class="hljs-string">&quot;I-product&quot;</span>, ]`}}),ws=new Xt({}),ks=new xo({props:{id:"iY2AZYdZAr0"}}),bs=new C({props:{code:`from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)`}}),$s=new C({props:{code:`tokenized_input = tokenizer(example["tokens"], is_split_into_words=True) tokens = tokenizer.convert_ids_to_tokens(tokenized_input["input_ids"]) tokens`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>tokenized_input = tokenizer(example[<span class="hljs-string">&quot;tokens&quot;</span>], is_split_into_words=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokens = tokenizer.convert_ids_to_tokens(tokenized_input[<span class="hljs-string">&quot;input_ids&quot;</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>tokens [<span class="hljs-string">&#x27;[CLS]&#x27;</span>, <span class="hljs-string">&#x27;@&#x27;</span>, <span class="hljs-string">&#x27;paul&#x27;</span>, <span class="hljs-string">&#x27;##walk&#x27;</span>, <span class="hljs-string">&#x27;it&#x27;</span>, <span class="hljs-string">&quot;&#x27;&quot;</span>, <span class="hljs-string">&#x27;s&#x27;</span>, <span class="hljs-string">&#x27;the&#x27;</span>, <span class="hljs-string">&#x27;view&#x27;</span>, <span class="hljs-string">&#x27;from&#x27;</span>, <span class="hljs-string">&#x27;where&#x27;</span>, <span class="hljs-string">&#x27;i&#x27;</span>, <span class="hljs-string">&quot;&#x27;&quot;</span>, <span class="hljs-string">&#x27;m&#x27;</span>, <span class="hljs-string">&#x27;living&#x27;</span>, <span class="hljs-string">&#x27;for&#x27;</span>, <span class="hljs-string">&#x27;two&#x27;</span>, <span class="hljs-string">&#x27;weeks&#x27;</span>, <span class="hljs-string">&#x27;.&#x27;</span>, <span class="hljs-string">&#x27;empire&#x27;</span>, <span class="hljs-string">&#x27;state&#x27;</span>, <span class="hljs-string">&#x27;building&#x27;</span>, <span class="hljs-string">&#x27;=&#x27;</span>, <span class="hljs-string">&#x27;es&#x27;</span>, <span class="hljs-string">&#x27;##b&#x27;</span>, <span class="hljs-string">&#x27;.&#x27;</span>, <span class="hljs-string">&#x27;pretty&#x27;</span>, <span class="hljs-string">&#x27;bad&#x27;</span>, <span class="hljs-string">&#x27;storm&#x27;</span>, <span class="hljs-string">&#x27;here&#x27;</span>, <span class="hljs-string">&#x27;last&#x27;</span>, <span class="hljs-string">&#x27;evening&#x27;</span>, <span class="hljs-string">&#x27;.&#x27;</span>, <span class="hljs-string">&#x27;[SEP]&#x27;</span>]`}}),ys=new C({props:{code:`def tokenize_and_align_labels(examples): tokenized_inputs = tokenizer(examples["tokens"], truncation=True, is_split_into_words=True) labels = [] for i, label in enumerate(examples[f"ner_tags"]): word_ids = tokenized_inputs.word_ids(batch_index=i) # Map tokens to their respective word. previous_word_idx = None label_ids = [] for word_idx in word_ids: # Set the special tokens to -100. if word_idx is None: label_ids.append(-100) elif word_idx != previous_word_idx: # Only label the first token of a given word. label_ids.append(label[word_idx]) else: label_ids.append(-100) previous_word_idx = word_idx labels.append(label_ids) tokenized_inputs["labels"] = labels return tokenized_inputs`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">tokenize_and_align_labels</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> tokenized_inputs = tokenizer(examples[<span class="hljs-string">&quot;tokens&quot;</span>], truncation=<span class="hljs-literal">True</span>, is_split_into_words=<span class="hljs-literal">True</span>) <span class="hljs-meta">... </span> labels = [] <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> i, label <span class="hljs-keyword">in</span> <span class="hljs-built_in">enumerate</span>(examples[<span class="hljs-string">f&quot;ner_tags&quot;</span>]): <span class="hljs-meta">... </span> word_ids = tokenized_inputs.word_ids(batch_index=i) <span class="hljs-comment"># Map tokens to their respective word.</span> <span class="hljs-meta">... </span> previous_word_idx = <span class="hljs-literal">None</span> <span class="hljs-meta">... </span> label_ids = [] <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> word_idx <span class="hljs-keyword">in</span> word_ids: <span class="hljs-comment"># Set the special tokens to -100.</span> <span class="hljs-meta">... </span> <span class="hljs-keyword">if</span> word_idx <span class="hljs-keyword">is</span> <span class="hljs-literal">None</span>: <span class="hljs-meta">... </span> label_ids.append(-<span class="hljs-number">100</span>) <span class="hljs-meta">... </span> <span class="hljs-keyword">elif</span> word_idx != previous_word_idx: <span class="hljs-comment"># Only label the first token of a given word.</span> <span class="hljs-meta">... </span> label_ids.append(label[word_idx]) <span class="hljs-meta">... </span> <span class="hljs-keyword">else</span>: <span class="hljs-meta">... </span> label_ids.append(-<span class="hljs-number">100</span>) <span class="hljs-meta">... </span> previous_word_idx = word_idx <span class="hljs-meta">... </span> labels.append(label_ids) <span class="hljs-meta">... </span> tokenized_inputs[<span class="hljs-string">&quot;labels&quot;</span>] = labels <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> tokenized_inputs`}}),zs=new C({props:{code:"tokenized_wnut = wnut.map(tokenize_and_align_labels, batched=True)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>tokenized_wnut = wnut.<span class="hljs-built_in">map</span>(tokenize_and_align_labels, batched=<span class="hljs-literal">True</span>)'}}),qs=new qo({props:{group1:{id:"pt",code:`from transformers import DataCollatorForTokenClassification data_collator = DataCollatorForTokenClassification(tokenizer=tokenizer)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorForTokenClassification(tokenizer=tokenizer)`},group2:{id:"tf",code:`from transformers import DataCollatorForTokenClassification data_collator = DataCollatorForTokenClassification(tokenizer=tokenizer, return_tensors="tf")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorForTokenClassification(tokenizer=tokenizer, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)`}}}),Cs=new Xt({}),As=new C({props:{code:`from transformers import AutoModelForTokenClassification, TrainingArguments, Trainer model = AutoModelForTokenClassification.from_pretrained("distilbert-base-uncased", num_labels=14)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForTokenClassification, TrainingArguments, Trainer <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForTokenClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>, num_labels=<span class="hljs-number">14</span>)`}}),as=new pa({props:{$$slots:{default:[Ao]},$$scope:{ctx:P}}}),Ss=new C({props:{code:`training_args = TrainingArguments( output_dir="./results", evaluation_strategy="epoch", learning_rate=2e-5, per_device_train_batch_size=16, per_device_eval_batch_size=16, num_train_epochs=3, weight_decay=0.01, ) trainer = Trainer( model=model, args=training_args, train_dataset=tokenized_wnut["train"], eval_dataset=tokenized_wnut["test"], tokenizer=tokenizer, data_collator=data_collator, ) trainer.train()`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> evaluation_strategy=<span class="hljs-string">&quot;epoch&quot;</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">2e-5</span>, <span class="hljs-meta">... </span> per_device_train_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> per_device_eval_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> num_train_epochs=<span class="hljs-number">3</span>, <span class="hljs-meta">... </span> weight_decay=<span class="hljs-number">0.01</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=tokenized_wnut[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=tokenized_wnut[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> tokenizer=tokenizer, <span class="hljs-meta">... </span> data_collator=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()`}}),Os=new Xt({}),ls=new pa({props:{$$slots:{default:[Do]},$$scope:{ctx:P}}}),Ns=new C({props:{code:`tf_train_set = tokenized_wnut["train"].to_tf_dataset( columns=["attention_mask", "input_ids", "labels"], shuffle=True, batch_size=16, collate_fn=data_collator, ) tf_validation_set = tokenized_wnut["validation"].to_tf_dataset( columns=["attention_mask", "input_ids", "labels"], shuffle=False, batch_size=16, collate_fn=data_collator, )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>tf_train_set = tokenized_wnut[<span class="hljs-string">&quot;train&quot;</span>].to_tf_dataset( <span class="hljs-meta">... </span> columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;labels&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_validation_set = tokenized_wnut[<span class="hljs-string">&quot;validation&quot;</span>].to_tf_dataset( <span class="hljs-meta">... </span> columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;labels&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>)`}}),Is=new C({props:{code:`from transformers import create_optimizer batch_size = 16 num_train_epochs = 3 num_train_steps = (len(tokenized_wnut["train"]) // batch_size) * num_train_epochs optimizer, lr_schedule = create_optimizer( init_lr=2e-5, num_train_steps=num_train_steps, weight_decay_rate=0.01, num_warmup_steps=0, )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> create_optimizer <span class="hljs-meta">&gt;&gt;&gt; </span>batch_size = <span class="hljs-number">16</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_train_epochs = <span class="hljs-number">3</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_train_steps = (<span class="hljs-built_in">len</span>(tokenized_wnut[<span class="hljs-string">&quot;train&quot;</span>]) // batch_size) * num_train_epochs <span class="hljs-meta">&gt;&gt;&gt; </span>optimizer, lr_schedule = create_optimizer( <span class="hljs-meta">... </span> init_lr=<span class="hljs-number">2e-5</span>, <span class="hljs-meta">... </span> num_train_steps=num_train_steps, <span class="hljs-meta">... </span> weight_decay_rate=<span class="hljs-number">0.01</span>, <span class="hljs-meta">... </span> num_warmup_steps=<span class="hljs-number">0</span>, <span class="hljs-meta">... </span>)`}}),Bs=new C({props:{code:`from transformers import TFAutoModelForTokenClassification model = TFAutoModelForTokenClassification.from_pretrained("distilbert-base-uncased", num_labels=2)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForTokenClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>, num_labels=<span class="hljs-number">2</span>)`}}),Rs=new C({props:{code:`import tensorflow as tf model.compile(optimizer=optimizer)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>model.<span class="hljs-built_in">compile</span>(optimizer=optimizer)`}}),Ws=new C({props:{code:"model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=3)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=<span class="hljs-number">3</span>)'}}),ps=new pa({props:{$$slots:{default:[Fo]},$$scope:{ctx:P}}}),{c(){m=o("meta"),x=c(),d=o("h1"),_=o("a"),y=o("span"),g(u.$$.fragment),v=c(),T=o("span"),z=a("Token classification"),E=c(),g(A.$$.fragment),O=c(),Ks=o("p"),ca=a("Token classification assigns a label to individual tokens in a sentence. One of the most common token classification tasks is Named Entity Recognition (NER). NER attempts to find a label for each entity in a sentence, such as a person, location, or organization."),se=c(),L=o("p"),ha=a("This guide will show you how to fine-tune "),ms=o("a"),fa=a("DistilBERT"),ma=a(" on the "),ds=o("a"),da=a("WNUT 17"),ua=a(" dataset to detect new entities."),te=c(),g(Z.$$.fragment),ee=c(),H=o("h2"),J=o("a"),dt=o("span"),g(us.$$.fragment),_a=c(),ut=o("span"),ga=a("Load WNUT 17 dataset"),ae=c(),Vs=o("p"),ja=a("Load the WNUT 17 dataset from the \u{1F917} Datasets library:"),ne=c(),g(_s.$$.fragment),le=c(),Zs=o("p"),wa=a("Then take a look at an example:"),oe=c(),g(gs.$$.fragment),re=c(),G=o("p"),ka=a("Each number in "),_t=o("code"),ba=a("ner_tags"),$a=a(" represents an entity. Convert the number to a label name for more information:"),ie=c(),g(js.$$.fragment),pe=c(),N=o("p"),va=a("The "),gt=o("code"),xa=a("ner_tag"),Ea=a(" describes an entity, such as a corporation, location, or person. The letter that prefixes each "),jt=o("code"),ya=a("ner_tag"),Ta=a(" indicates the token position of the entity:"),ce=c(),I=o("ul"),Js=o("li"),wt=o("code"),za=a("B-"),qa=a(" indicates the beginning of an entity."),Ca=c(),B=o("li"),kt=o("code"),Aa=a("I-"),Da=a(" indicates a token is contained inside the same entity (e.g., the "),bt=o("code"),Fa=a("State"),Pa=a(` token is a part of an entity like `),$t=o("code"),Sa=a("Empire State Building"),Oa=a(")."),La=c(),Gs=o("li"),vt=o("code"),Na=a("0"),Ia=a(" indicates the token doesn\u2019t correspond to any entity."),he=c(),Y=o("h2"),Q=o("a"),xt=o("span"),g(ws.$$.fragment),Ba=c(),Et=o("span"),Ma=a("Preprocess"),fe=c(),g(ks.$$.fragment),me=c(),X=o("p"),Ra=a("Load the DistilBERT tokenizer to process the "),yt=o("code"),Ua=a("tokens"),Wa=a(":"),de=c(),g(bs.$$.fragment),ue=c(),ss=o("p"),Ha=a("Since the input has already been split into words, set "),Tt=o("code"),Ya=a("is_split_into_words=True"),Ka=a(" to tokenize the words into subwords:"),_e=c(),g($s.$$.fragment),ge=c(),M=o("p"),Va=a("Adding the special tokens "),zt=o("code"),Za=a("[CLS]"),Ja=a(" and "),qt=o("code"),Ga=a("[SEP]"),Qa=a(" and subword tokenization creates a mismatch between the input and labels. A single word corresponding to a single label may be split into two subwords. You will need to realign the tokens and labels by:"),je=c(),R=o("ol"),vs=o("li"),Xa=a("Mapping all tokens to their corresponding word with the "),xs=o("a"),Ct=o("code"),sn=a("word_ids"),tn=a(" method."),en=c(),S=o("li"),an=a("Assigning the label "),At=o("code"),nn=a("-100"),ln=a(" to the special tokens "),Dt=o("code"),on=a("[CLS]"),rn=a(" and "),Ft=o("code"),pn=a("[SEP]"),cn=a(` so the PyTorch loss function ignores them.`),hn=c(),Es=o("li"),fn=a("Only labeling the first token of a given word. Assign "),Pt=o("code"),mn=a("-100"),dn=a(" to other subtokens from the same word."),we=c(),Qs=o("p"),un=a("Here is how you can create a function to realign the tokens and labels, and truncate sequences to be no longer than DistilBERT\u2019s maximum input length::"),ke=c(),g(ys.$$.fragment),be=c(),D=o("p"),_n=a("Use \u{1F917} Datasets "),Ts=o("a"),St=o("code"),gn=a("map"),jn=a(" function to tokenize and align the labels over the entire dataset. You can speed up the "),Ot=o("code"),wn=a("map"),kn=a(" function by setting "),Lt=o("code"),bn=a("batched=True"),$n=a(" to process multiple elements of the dataset at once:"),$e=c(),g(zs.$$.fragment),ve=c(),q=o("p"),vn=a("Use "),Xs=o("a"),xn=a("DataCollatorForTokenClassification"),En=a(" to create a batch of examples. It will also "),Nt=o("em"),yn=a("dynamically pad"),Tn=a(" your text and labels to the length of the longest element in its batch, so they are a uniform length. While it is possible to pad your text in the "),It=o("code"),zn=a("tokenizer"),qn=a(" function by setting "),Bt=o("code"),Cn=a("padding=True"),An=a(", dynamic padding is more efficient."),xe=c(),g(qs.$$.fragment),Ee=c(),K=o("h2"),ts=o("a"),Mt=o("span"),g(Cs.$$.fragment),Dn=c(),Rt=o("span"),Fn=a("Fine-tune with Trainer"),ye=c(),es=o("p"),Pn=a("Load DistilBERT with "),st=o("a"),Sn=a("AutoModelForTokenClassification"),On=a(" along with the number of expected labels:"),Te=c(),g(As.$$.fragment),ze=c(),g(as.$$.fragment),qe=c(),tt=o("p"),Ln=a("At this point, only three steps remain:"),Ce=c(),U=o("ol"),Ds=o("li"),Nn=a("Define your training hyperparameters in "),et=o("a"),In=a("TrainingArguments"),Bn=a("."),Mn=c(),Fs=o("li"),Rn=a("Pass the training arguments to "),at=o("a"),Un=a("Trainer"),Wn=a(" along with the model, dataset, tokenizer, and data collator."),Hn=c(),Ps=o("li"),Yn=a("Call "),nt=o("a"),Kn=a("train()"),Vn=a(" to fine-tune your model."),Ae=c(),g(Ss.$$.fragment),De=c(),V=o("h2"),ns=o("a"),Ut=o("span"),g(Os.$$.fragment),Zn=c(),Wt=o("span"),Jn=a("Fine-tune with TensorFlow"),Fe=c(),lt=o("p"),Gn=a("To fine-tune a model in TensorFlow is just as easy, with only a few differences."),Pe=c(),g(ls.$$.fragment),Se=c(),F=o("p"),Qn=a("Convert your datasets to the "),Ht=o("code"),Xn=a("tf.data.Dataset"),sl=a(" format with "),Ls=o("a"),Yt=o("code"),tl=a("to_tf_dataset"),el=a(". Specify inputs and labels in "),Kt=o("code"),al=a("columns"),nl=a(", whether to shuffle the dataset order, batch size, and the data collator:"),Oe=c(),g(Ns.$$.fragment),Le=c(),ot=o("p"),ll=a("Set up an optimizer function, learning rate schedule, and some training hyperparameters:"),Ne=c(),g(Is.$$.fragment),Ie=c(),os=o("p"),ol=a("Load DistilBERT with "),rt=o("a"),rl=a("TFAutoModelForTokenClassification"),il=a(" along with the number of expected labels:"),Be=c(),g(Bs.$$.fragment),Me=c(),rs=o("p"),pl=a("Configure the model for training with "),Ms=o("a"),Vt=o("code"),cl=a("compile"),hl=a(":"),Re=c(),g(Rs.$$.fragment),Ue=c(),is=o("p"),fl=a("Call "),Us=o("a"),Zt=o("code"),ml=a("fit"),dl=a(" to fine-tune the model:"),We=c(),g(Ws.$$.fragment),He=c(),g(ps.$$.fragment),this.h()},l(s){const l=zo('[data-svelte="svelte-1phssyn"]',document.head);m=r(l,"META",{name:!0,content:!0}),l.forEach(e),x=h(s),d=r(s,"H1",{class:!0});var Hs=i(d);_=r(Hs,"A",{id:!0,class:!0,href:!0});var Jt=i(_);y=r(Jt,"SPAN",{});var Gt=i(y);j(u.$$.fragment,Gt),Gt.forEach(e),Jt.forEach(e),v=h(Hs),T=r(Hs,"SPAN",{});var Qt=i(T);z=n(Qt,"Token classification"),Qt.forEach(e),Hs.forEach(e),E=h(s),j(A.$$.fragment,s),O=h(s),Ks=r(s,"P",{});var gl=i(Ks);ca=n(gl,"Token classification assigns a label to individual tokens in a sentence. One of the most common token classification tasks is Named Entity Recognition (NER). NER attempts to find a label for each entity in a sentence, such as a person, location, or organization."),gl.forEach(e),se=h(s),L=r(s,"P",{});var it=i(L);ha=n(it,"This guide will show you how to fine-tune "),ms=r(it,"A",{href:!0,rel:!0});var jl=i(ms);fa=n(jl,"DistilBERT"),jl.forEach(e),ma=n(it," on the "),ds=r(it,"A",{href:!0,rel:!0});var wl=i(ds);da=n(wl,"WNUT 17"),wl.forEach(e),ua=n(it," dataset to detect new entities."),it.forEach(e),te=h(s),j(Z.$$.fragment,s),ee=h(s),H=r(s,"H2",{class:!0});var Ke=i(H);J=r(Ke,"A",{id:!0,class:!0,href:!0});var kl=i(J);dt=r(kl,"SPAN",{});var bl=i(dt);j(us.$$.fragment,bl),bl.forEach(e),kl.forEach(e),_a=h(Ke),ut=r(Ke,"SPAN",{});var $l=i(ut);ga=n($l,"Load WNUT 17 dataset"),$l.forEach(e),Ke.forEach(e),ae=h(s),Vs=r(s,"P",{});var vl=i(Vs);ja=n(vl,"Load the WNUT 17 dataset from the \u{1F917} Datasets library:"),vl.forEach(e),ne=h(s),j(_s.$$.fragment,s),le=h(s),Zs=r(s,"P",{});var xl=i(Zs);wa=n(xl,"Then take a look at an example:"),xl.forEach(e),oe=h(s),j(gs.$$.fragment,s),re=h(s),G=r(s,"P",{});var Ve=i(G);ka=n(Ve,"Each number in "),_t=r(Ve,"CODE",{});var El=i(_t);ba=n(El,"ner_tags"),El.forEach(e),$a=n(Ve," represents an entity. Convert the number to a label name for more information:"),Ve.forEach(e),ie=h(s),j(js.$$.fragment,s),pe=h(s),N=r(s,"P",{});var pt=i(N);va=n(pt,"The "),gt=r(pt,"CODE",{});var yl=i(gt);xa=n(yl,"ner_tag"),yl.forEach(e),Ea=n(pt," describes an entity, such as a corporation, location, or person. The letter that prefixes each "),jt=r(pt,"CODE",{});var Tl=i(jt);ya=n(Tl,"ner_tag"),Tl.forEach(e),Ta=n(pt," indicates the token position of the entity:"),pt.forEach(e),ce=h(s),I=r(s,"UL",{});var ct=i(I);Js=r(ct,"LI",{});var ul=i(Js);wt=r(ul,"CODE",{});var zl=i(wt);za=n(zl,"B-"),zl.forEach(e),qa=n(ul," indicates the beginning of an entity."),ul.forEach(e),Ca=h(ct),B=r(ct,"LI",{});var Ys=i(B);kt=r(Ys,"CODE",{});var ql=i(kt);Aa=n(ql,"I-"),ql.forEach(e),Da=n(Ys," indicates a token is contained inside the same entity (e.g., the "),bt=r(Ys,"CODE",{});var Cl=i(bt);Fa=n(Cl,"State"),Cl.forEach(e),Pa=n(Ys,` token is a part of an entity like `),$t=r(Ys,"CODE",{});var Al=i($t);Sa=n(Al,"Empire State Building"),Al.forEach(e),Oa=n(Ys,")."),Ys.forEach(e),La=h(ct),Gs=r(ct,"LI",{});var _l=i(Gs);vt=r(_l,"CODE",{});var Dl=i(vt);Na=n(Dl,"0"),Dl.forEach(e),Ia=n(_l," indicates the token doesn\u2019t correspond to any entity."),_l.forEach(e),ct.forEach(e),he=h(s),Y=r(s,"H2",{class:!0});var Ze=i(Y);Q=r(Ze,"A",{id:!0,class:!0,href:!0});var Fl=i(Q);xt=r(Fl,"SPAN",{});var Pl=i(xt);j(ws.$$.fragment,Pl),Pl.forEach(e),Fl.forEach(e),Ba=h(Ze),Et=r(Ze,"SPAN",{});var Sl=i(Et);Ma=n(Sl,"Preprocess"),Sl.forEach(e),Ze.forEach(e),fe=h(s),j(ks.$$.fragment,s),me=h(s),X=r(s,"P",{});var Je=i(X);Ra=n(Je,"Load the DistilBERT tokenizer to process the "),yt=r(Je,"CODE",{});var Ol=i(yt);Ua=n(Ol,"tokens"),Ol.forEach(e),Wa=n(Je,":"),Je.forEach(e),de=h(s),j(bs.$$.fragment,s),ue=h(s),ss=r(s,"P",{});var Ge=i(ss);Ha=n(Ge,"Since the input has already been split into words, set "),Tt=r(Ge,"CODE",{});var Ll=i(Tt);Ya=n(Ll,"is_split_into_words=True"),Ll.forEach(e),Ka=n(Ge," to tokenize the words into subwords:"),Ge.forEach(e),_e=h(s),j($s.$$.fragment,s),ge=h(s),M=r(s,"P",{});var ht=i(M);Va=n(ht,"Adding the special tokens "),zt=r(ht,"CODE",{});var Nl=i(zt);Za=n(Nl,"[CLS]"),Nl.forEach(e),Ja=n(ht," and "),qt=r(ht,"CODE",{});var Il=i(qt);Ga=n(Il,"[SEP]"),Il.forEach(e),Qa=n(ht," and subword tokenization creates a mismatch between the input and labels. A single word corresponding to a single label may be split into two subwords. You will need to realign the tokens and labels by:"),ht.forEach(e),je=h(s),R=r(s,"OL",{});var ft=i(R);vs=r(ft,"LI",{});var Qe=i(vs);Xa=n(Qe,"Mapping all tokens to their corresponding word with the "),xs=r(Qe,"A",{href:!0,rel:!0});var Bl=i(xs);Ct=r(Bl,"CODE",{});var Ml=i(Ct);sn=n(Ml,"word_ids"),Ml.forEach(e),Bl.forEach(e),tn=n(Qe," method."),Qe.forEach(e),en=h(ft),S=r(ft,"LI",{});var cs=i(S);an=n(cs,"Assigning the label "),At=r(cs,"CODE",{});var Rl=i(At);nn=n(Rl,"-100"),Rl.forEach(e),ln=n(cs," to the special tokens "),Dt=r(cs,"CODE",{});var Ul=i(Dt);on=n(Ul,"[CLS]"),Ul.forEach(e),rn=n(cs," and "),Ft=r(cs,"CODE",{});var Wl=i(Ft);pn=n(Wl,"[SEP]"),Wl.forEach(e),cn=n(cs,` so the PyTorch loss function ignores them.`),cs.forEach(e),hn=h(ft),Es=r(ft,"LI",{});var Xe=i(Es);fn=n(Xe,"Only labeling the first token of a given word. Assign "),Pt=r(Xe,"CODE",{});var Hl=i(Pt);mn=n(Hl,"-100"),Hl.forEach(e),dn=n(Xe," to other subtokens from the same word."),Xe.forEach(e),ft.forEach(e),we=h(s),Qs=r(s,"P",{});var Yl=i(Qs);un=n(Yl,"Here is how you can create a function to realign the tokens and labels, and truncate sequences to be no longer than DistilBERT\u2019s maximum input length::"),Yl.forEach(e),ke=h(s),j(ys.$$.fragment,s),be=h(s),D=r(s,"P",{});var hs=i(D);_n=n(hs,"Use \u{1F917} Datasets "),Ts=r(hs,"A",{href:!0,rel:!0});var Kl=i(Ts);St=r(Kl,"CODE",{});var Vl=i(St);gn=n(Vl,"map"),Vl.forEach(e),Kl.forEach(e),jn=n(hs," function to tokenize and align the labels over the entire dataset. You can speed up the "),Ot=r(hs,"CODE",{});var Zl=i(Ot);wn=n(Zl,"map"),Zl.forEach(e),kn=n(hs," function by setting "),Lt=r(hs,"CODE",{});var Jl=i(Lt);bn=n(Jl,"batched=True"),Jl.forEach(e),$n=n(hs," to process multiple elements of the dataset at once:"),hs.forEach(e),$e=h(s),j(zs.$$.fragment,s),ve=h(s),q=r(s,"P",{});var W=i(q);vn=n(W,"Use "),Xs=r(W,"A",{href:!0});var Gl=i(Xs);xn=n(Gl,"DataCollatorForTokenClassification"),Gl.forEach(e),En=n(W," to create a batch of examples. It will also "),Nt=r(W,"EM",{});var Ql=i(Nt);yn=n(Ql,"dynamically pad"),Ql.forEach(e),Tn=n(W," your text and labels to the length of the longest element in its batch, so they are a uniform length. While it is possible to pad your text in the "),It=r(W,"CODE",{});var Xl=i(It);zn=n(Xl,"tokenizer"),Xl.forEach(e),qn=n(W," function by setting "),Bt=r(W,"CODE",{});var so=i(Bt);Cn=n(so,"padding=True"),so.forEach(e),An=n(W,", dynamic padding is more efficient."),W.forEach(e),xe=h(s),j(qs.$$.fragment,s),Ee=h(s),K=r(s,"H2",{class:!0});var sa=i(K);ts=r(sa,"A",{id:!0,class:!0,href:!0});var to=i(ts);Mt=r(to,"SPAN",{});var eo=i(Mt);j(Cs.$$.fragment,eo),eo.forEach(e),to.forEach(e),Dn=h(sa),Rt=r(sa,"SPAN",{});var ao=i(Rt);Fn=n(ao,"Fine-tune with Trainer"),ao.forEach(e),sa.forEach(e),ye=h(s),es=r(s,"P",{});var ta=i(es);Pn=n(ta,"Load DistilBERT with "),st=r(ta,"A",{href:!0});var no=i(st);Sn=n(no,"AutoModelForTokenClassification"),no.forEach(e),On=n(ta," along with the number of expected labels:"),ta.forEach(e),Te=h(s),j(As.$$.fragment,s),ze=h(s),j(as.$$.fragment,s),qe=h(s),tt=r(s,"P",{});var lo=i(tt);Ln=n(lo,"At this point, only three steps remain:"),lo.forEach(e),Ce=h(s),U=r(s,"OL",{});var mt=i(U);Ds=r(mt,"LI",{});var ea=i(Ds);Nn=n(ea,"Define your training hyperparameters in "),et=r(ea,"A",{href:!0});var oo=i(et);In=n(oo,"TrainingArguments"),oo.forEach(e),Bn=n(ea,"."),ea.forEach(e),Mn=h(mt),Fs=r(mt,"LI",{});var aa=i(Fs);Rn=n(aa,"Pass the training arguments to "),at=r(aa,"A",{href:!0});var ro=i(at);Un=n(ro,"Trainer"),ro.forEach(e),Wn=n(aa," along with the model, dataset, tokenizer, and data collator."),aa.forEach(e),Hn=h(mt),Ps=r(mt,"LI",{});var na=i(Ps);Yn=n(na,"Call "),nt=r(na,"A",{href:!0});var io=i(nt);Kn=n(io,"train()"),io.forEach(e),Vn=n(na," to fine-tune your model."),na.forEach(e),mt.forEach(e),Ae=h(s),j(Ss.$$.fragment,s),De=h(s),V=r(s,"H2",{class:!0});var la=i(V);ns=r(la,"A",{id:!0,class:!0,href:!0});var po=i(ns);Ut=r(po,"SPAN",{});var co=i(Ut);j(Os.$$.fragment,co),co.forEach(e),po.forEach(e),Zn=h(la),Wt=r(la,"SPAN",{});var ho=i(Wt);Jn=n(ho,"Fine-tune with TensorFlow"),ho.forEach(e),la.forEach(e),Fe=h(s),lt=r(s,"P",{});var fo=i(lt);Gn=n(fo,"To fine-tune a model in TensorFlow is just as easy, with only a few differences."),fo.forEach(e),Pe=h(s),j(ls.$$.fragment,s),Se=h(s),F=r(s,"P",{});var fs=i(F);Qn=n(fs,"Convert your datasets to the "),Ht=r(fs,"CODE",{});var mo=i(Ht);Xn=n(mo,"tf.data.Dataset"),mo.forEach(e),sl=n(fs," format with "),Ls=r(fs,"A",{href:!0,rel:!0});var uo=i(Ls);Yt=r(uo,"CODE",{});var _o=i(Yt);tl=n(_o,"to_tf_dataset"),_o.forEach(e),uo.forEach(e),el=n(fs,". Specify inputs and labels in "),Kt=r(fs,"CODE",{});var go=i(Kt);al=n(go,"columns"),go.forEach(e),nl=n(fs,", whether to shuffle the dataset order, batch size, and the data collator:"),fs.forEach(e),Oe=h(s),j(Ns.$$.fragment,s),Le=h(s),ot=r(s,"P",{});var jo=i(ot);ll=n(jo,"Set up an optimizer function, learning rate schedule, and some training hyperparameters:"),jo.forEach(e),Ne=h(s),j(Is.$$.fragment,s),Ie=h(s),os=r(s,"P",{});var oa=i(os);ol=n(oa,"Load DistilBERT with "),rt=r(oa,"A",{href:!0});var wo=i(rt);rl=n(wo,"TFAutoModelForTokenClassification"),wo.forEach(e),il=n(oa," along with the number of expected labels:"),oa.forEach(e),Be=h(s),j(Bs.$$.fragment,s),Me=h(s),rs=r(s,"P",{});var ra=i(rs);pl=n(ra,"Configure the model for training with "),Ms=r(ra,"A",{href:!0,rel:!0});var ko=i(Ms);Vt=r(ko,"CODE",{});var bo=i(Vt);cl=n(bo,"compile"),bo.forEach(e),ko.forEach(e),hl=n(ra,":"),ra.forEach(e),Re=h(s),j(Rs.$$.fragment,s),Ue=h(s),is=r(s,"P",{});var ia=i(is);fl=n(ia,"Call "),Us=r(ia,"A",{href:!0,rel:!0});var $o=i(Us);Zt=r($o,"CODE",{});var vo=i(Zt);ml=n(vo,"fit"),vo.forEach(e),$o.forEach(e),dl=n(ia," to fine-tune the model:"),ia.forEach(e),We=h(s),j(Ws.$$.fragment,s),He=h(s),j(ps.$$.fragment,s),this.h()},h(){f(m,"name","hf:doc:metadata"),f(m,"content",JSON.stringify(So)),f(_,"id","token-classification"),f(_,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(_,"href","#token-classification"),f(d,"class","relative group"),f(ms,"href","https://huggingface.co/distilbert-base-uncased"),f(ms,"rel","nofollow"),f(ds,"href","https://huggingface.co/datasets/wnut_17"),f(ds,"rel","nofollow"),f(J,"id","load-wnut-17-dataset"),f(J,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(J,"href","#load-wnut-17-dataset"),f(H,"class","relative group"),f(Q,"id","preprocess"),f(Q,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(Q,"href","#preprocess"),f(Y,"class","relative group"),f(xs,"href","https://huggingface.co/docs/tokenizers/python/latest/api/reference.html#tokenizers.Encoding.word_ids"),f(xs,"rel","nofollow"),f(Ts,"href","https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map"),f(Ts,"rel","nofollow"),f(Xs,"href","/docs/transformers/pr_16143/en/main_classes/data_collator#transformers.DataCollatorForTokenClassification"),f(ts,"id","finetune-with-trainer"),f(ts,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(ts,"href","#finetune-with-trainer"),f(K,"class","relative group"),f(st,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoModelForTokenClassification"),f(et,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments"),f(at,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),f(nt,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer.train"),f(ns,"id","finetune-with-tensorflow"),f(ns,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(ns,"href","#finetune-with-tensorflow"),f(V,"class","relative group"),f(Ls,"href","https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.to_tf_dataset"),f(Ls,"rel","nofollow"),f(rt,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.TFAutoModelForTokenClassification"),f(Ms,"href","https://keras.io/api/models/model_training_apis/#compile-method"),f(Ms,"rel","nofollow"),f(Us,"href","https://keras.io/api/models/model_training_apis/#fit-method"),f(Us,"rel","nofollow")},m(s,l){t(document.head,m),p(s,x,l),p(s,d,l),t(d,_),t(_,y),w(u,y,null),t(d,v),t(d,T),t(T,z),p(s,E,l),w(A,s,l),p(s,O,l),p(s,Ks,l),t(Ks,ca),p(s,se,l),p(s,L,l),t(L,ha),t(L,ms),t(ms,fa),t(L,ma),t(L,ds),t(ds,da),t(L,ua),p(s,te,l),w(Z,s,l),p(s,ee,l),p(s,H,l),t(H,J),t(J,dt),w(us,dt,null),t(H,_a),t(H,ut),t(ut,ga),p(s,ae,l),p(s,Vs,l),t(Vs,ja),p(s,ne,l),w(_s,s,l),p(s,le,l),p(s,Zs,l),t(Zs,wa),p(s,oe,l),w(gs,s,l),p(s,re,l),p(s,G,l),t(G,ka),t(G,_t),t(_t,ba),t(G,$a),p(s,ie,l),w(js,s,l),p(s,pe,l),p(s,N,l),t(N,va),t(N,gt),t(gt,xa),t(N,Ea),t(N,jt),t(jt,ya),t(N,Ta),p(s,ce,l),p(s,I,l),t(I,Js),t(Js,wt),t(wt,za),t(Js,qa),t(I,Ca),t(I,B),t(B,kt),t(kt,Aa),t(B,Da),t(B,bt),t(bt,Fa),t(B,Pa),t(B,$t),t($t,Sa),t(B,Oa),t(I,La),t(I,Gs),t(Gs,vt),t(vt,Na),t(Gs,Ia),p(s,he,l),p(s,Y,l),t(Y,Q),t(Q,xt),w(ws,xt,null),t(Y,Ba),t(Y,Et),t(Et,Ma),p(s,fe,l),w(ks,s,l),p(s,me,l),p(s,X,l),t(X,Ra),t(X,yt),t(yt,Ua),t(X,Wa),p(s,de,l),w(bs,s,l),p(s,ue,l),p(s,ss,l),t(ss,Ha),t(ss,Tt),t(Tt,Ya),t(ss,Ka),p(s,_e,l),w($s,s,l),p(s,ge,l),p(s,M,l),t(M,Va),t(M,zt),t(zt,Za),t(M,Ja),t(M,qt),t(qt,Ga),t(M,Qa),p(s,je,l),p(s,R,l),t(R,vs),t(vs,Xa),t(vs,xs),t(xs,Ct),t(Ct,sn),t(vs,tn),t(R,en),t(R,S),t(S,an),t(S,At),t(At,nn),t(S,ln),t(S,Dt),t(Dt,on),t(S,rn),t(S,Ft),t(Ft,pn),t(S,cn),t(R,hn),t(R,Es),t(Es,fn),t(Es,Pt),t(Pt,mn),t(Es,dn),p(s,we,l),p(s,Qs,l),t(Qs,un),p(s,ke,l),w(ys,s,l),p(s,be,l),p(s,D,l),t(D,_n),t(D,Ts),t(Ts,St),t(St,gn),t(D,jn),t(D,Ot),t(Ot,wn),t(D,kn),t(D,Lt),t(Lt,bn),t(D,$n),p(s,$e,l),w(zs,s,l),p(s,ve,l),p(s,q,l),t(q,vn),t(q,Xs),t(Xs,xn),t(q,En),t(q,Nt),t(Nt,yn),t(q,Tn),t(q,It),t(It,zn),t(q,qn),t(q,Bt),t(Bt,Cn),t(q,An),p(s,xe,l),w(qs,s,l),p(s,Ee,l),p(s,K,l),t(K,ts),t(ts,Mt),w(Cs,Mt,null),t(K,Dn),t(K,Rt),t(Rt,Fn),p(s,ye,l),p(s,es,l),t(es,Pn),t(es,st),t(st,Sn),t(es,On),p(s,Te,l),w(As,s,l),p(s,ze,l),w(as,s,l),p(s,qe,l),p(s,tt,l),t(tt,Ln),p(s,Ce,l),p(s,U,l),t(U,Ds),t(Ds,Nn),t(Ds,et),t(et,In),t(Ds,Bn),t(U,Mn),t(U,Fs),t(Fs,Rn),t(Fs,at),t(at,Un),t(Fs,Wn),t(U,Hn),t(U,Ps),t(Ps,Yn),t(Ps,nt),t(nt,Kn),t(Ps,Vn),p(s,Ae,l),w(Ss,s,l),p(s,De,l),p(s,V,l),t(V,ns),t(ns,Ut),w(Os,Ut,null),t(V,Zn),t(V,Wt),t(Wt,Jn),p(s,Fe,l),p(s,lt,l),t(lt,Gn),p(s,Pe,l),w(ls,s,l),p(s,Se,l),p(s,F,l),t(F,Qn),t(F,Ht),t(Ht,Xn),t(F,sl),t(F,Ls),t(Ls,Yt),t(Yt,tl),t(F,el),t(F,Kt),t(Kt,al),t(F,nl),p(s,Oe,l),w(Ns,s,l),p(s,Le,l),p(s,ot,l),t(ot,ll),p(s,Ne,l),w(Is,s,l),p(s,Ie,l),p(s,os,l),t(os,ol),t(os,rt),t(rt,rl),t(os,il),p(s,Be,l),w(Bs,s,l),p(s,Me,l),p(s,rs,l),t(rs,pl),t(rs,Ms),t(Ms,Vt),t(Vt,cl),t(rs,hl),p(s,Re,l),w(Rs,s,l),p(s,Ue,l),p(s,is,l),t(is,fl),t(is,Us),t(Us,Zt),t(Zt,ml),t(is,dl),p(s,We,l),w(Ws,s,l),p(s,He,l),w(ps,s,l),Ye=!0},p(s,[l]){const Hs={};l&2&&(Hs.$$scope={dirty:l,ctx:s}),Z.$set(Hs);const Jt={};l&2&&(Jt.$$scope={dirty:l,ctx:s}),as.$set(Jt);const Gt={};l&2&&(Gt.$$scope={dirty:l,ctx:s}),ls.$set(Gt);const Qt={};l&2&&(Qt.$$scope={dirty:l,ctx:s}),ps.$set(Qt)},i(s){Ye||(k(u.$$.fragment,s),k(A.$$.fragment,s),k(Z.$$.fragment,s),k(us.$$.fragment,s),k(_s.$$.fragment,s),k(gs.$$.fragment,s),k(js.$$.fragment,s),k(ws.$$.fragment,s),k(ks.$$.fragment,s),k(bs.$$.fragment,s),k($s.$$.fragment,s),k(ys.$$.fragment,s),k(zs.$$.fragment,s),k(qs.$$.fragment,s),k(Cs.$$.fragment,s),k(As.$$.fragment,s),k(as.$$.fragment,s),k(Ss.$$.fragment,s),k(Os.$$.fragment,s),k(ls.$$.fragment,s),k(Ns.$$.fragment,s),k(Is.$$.fragment,s),k(Bs.$$.fragment,s),k(Rs.$$.fragment,s),k(Ws.$$.fragment,s),k(ps.$$.fragment,s),Ye=!0)},o(s){b(u.$$.fragment,s),b(A.$$.fragment,s),b(Z.$$.fragment,s),b(us.$$.fragment,s),b(_s.$$.fragment,s),b(gs.$$.fragment,s),b(js.$$.fragment,s),b(ws.$$.fragment,s),b(ks.$$.fragment,s),b(bs.$$.fragment,s),b($s.$$.fragment,s),b(ys.$$.fragment,s),b(zs.$$.fragment,s),b(qs.$$.fragment,s),b(Cs.$$.fragment,s),b(As.$$.fragment,s),b(as.$$.fragment,s),b(Ss.$$.fragment,s),b(Os.$$.fragment,s),b(ls.$$.fragment,s),b(Ns.$$.fragment,s),b(Is.$$.fragment,s),b(Bs.$$.fragment,s),b(Rs.$$.fragment,s),b(Ws.$$.fragment,s),b(ps.$$.fragment,s),Ye=!1},d(s){e(m),s&&e(x),s&&e(d),$(u),s&&e(E),$(A,s),s&&e(O),s&&e(Ks),s&&e(se),s&&e(L),s&&e(te),$(Z,s),s&&e(ee),s&&e(H),$(us),s&&e(ae),s&&e(Vs),s&&e(ne),$(_s,s),s&&e(le),s&&e(Zs),s&&e(oe),$(gs,s),s&&e(re),s&&e(G),s&&e(ie),$(js,s),s&&e(pe),s&&e(N),s&&e(ce),s&&e(I),s&&e(he),s&&e(Y),$(ws),s&&e(fe),$(ks,s),s&&e(me),s&&e(X),s&&e(de),$(bs,s),s&&e(ue),s&&e(ss),s&&e(_e),$($s,s),s&&e(ge),s&&e(M),s&&e(je),s&&e(R),s&&e(we),s&&e(Qs),s&&e(ke),$(ys,s),s&&e(be),s&&e(D),s&&e($e),$(zs,s),s&&e(ve),s&&e(q),s&&e(xe),$(qs,s),s&&e(Ee),s&&e(K),$(Cs),s&&e(ye),s&&e(es),s&&e(Te),$(As,s),s&&e(ze),$(as,s),s&&e(qe),s&&e(tt),s&&e(Ce),s&&e(U),s&&e(Ae),$(Ss,s),s&&e(De),s&&e(V),$(Os),s&&e(Fe),s&&e(lt),s&&e(Pe),$(ls,s),s&&e(Se),s&&e(F),s&&e(Oe),$(Ns,s),s&&e(Le),s&&e(ot),s&&e(Ne),$(Is,s),s&&e(Ie),s&&e(os),s&&e(Be),$(Bs,s),s&&e(Me),s&&e(rs),s&&e(Re),$(Rs,s),s&&e(Ue),s&&e(is),s&&e(We),$(Ws,s),s&&e(He),$(ps,s)}}}const So={local:"token-classification",sections:[{local:"load-wnut-17-dataset",title:"Load WNUT 17 dataset"},{local:"preprocess",title:"Preprocess"},{local:"finetune-with-trainer",title:"Fine-tune with Trainer"},{local:"finetune-with-tensorflow",title:"Fine-tune with TensorFlow"}],title:"Token classification"};function Oo(P,m,x){let{fw:d}=m;return P.$$set=_=>{"fw"in _&&x(0,d=_.fw)},[d]}class Wo extends Eo{constructor(m){super();yo(this,m,Oo,Po,To,{fw:0})}}export{Wo as default,So as metadata};
409
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/tasks/multiple_choice.mdx-7c0ab1b6.js
import{S as ql,i as xl,s as Tl,e as o,k as c,w as u,t,M as Cl,c as r,d as a,m as h,a as p,x as _,h as n,b as f,F as e,g as i,y as g,q as j,o as w,B as b}from"../../chunks/vendor-4833417e.js";import{T as zl}from"../../chunks/Tip-fffd6df1.js";import{I as Te}from"../../chunks/IconCopyLink-4b81c553.js";import{C as A}from"../../chunks/CodeBlock-6a3d1b46.js";import{C as Al}from"../../chunks/CodeBlockFw-27a176a0.js";import"../../chunks/CopyButton-dacfbfaf.js";function Pl(ts){let d,$,m,k,x;return{c(){d=o("p"),$=t("If you aren\u2019t familiar with fine-tuning a model with Trainer, take a look at the basic tutorial "),m=o("a"),k=t("here"),x=t("!"),this.h()},l(v){d=r(v,"P",{});var y=p(d);$=n(y,"If you aren\u2019t familiar with fine-tuning a model with Trainer, take a look at the basic tutorial "),m=r(y,"A",{href:!0});var P=p(m);k=n(P,"here"),P.forEach(a),x=n(y,"!"),y.forEach(a),this.h()},h(){f(m,"href","training#finetune-with-trainer")},m(v,y){i(v,d,y),e(d,$),e(d,m),e(m,k),e(d,x)},d(v){v&&a(d)}}}function Dl(ts){let d,$,m,k,x;return{c(){d=o("p"),$=t("If you aren\u2019t familiar with fine-tuning a model with Keras, take a look at the basic tutorial "),m=o("a"),k=t("here"),x=t("!"),this.h()},l(v){d=r(v,"P",{});var y=p(d);$=n(y,"If you aren\u2019t familiar with fine-tuning a model with Keras, take a look at the basic tutorial "),m=r(y,"A",{href:!0});var P=p(m);k=n(P,"here"),P.forEach(a),x=n(y,"!"),y.forEach(a),this.h()},h(){f(m,"href","training#finetune-with-keras")},m(v,y){i(v,d,y),e(d,$),e(d,m),e(m,k),e(d,x)},d(v){v&&a(d)}}}function Ml(ts){let d,$,m,k,x,v,y,P,xa,Ce,Ds,Ta,Ae,T,Ca,ns,Aa,Pa,Qs,Da,Ma,ls,Fa,Oa,Pe,B,G,Vs,os,Sa,Xs,La,De,Ms,Ba,Me,rs,Fe,Fs,Na,Oe,ps,Se,E,Ia,Zs,Wa,Ua,se,Ga,Ra,ee,Ha,Ya,ae,Ka,Ja,Le,N,R,te,is,Qa,ne,Va,Be,Os,Xa,Ne,cs,Ie,Ss,Za,We,M,I,st,le,et,at,oe,tt,nt,lt,hs,ot,re,rt,pt,it,D,ct,pe,ht,ft,ie,dt,mt,ce,ut,_t,Ue,fs,Ge,C,gt,ds,he,jt,wt,fe,bt,kt,de,vt,yt,Re,ms,He,z,$t,Ls,Et,zt,me,qt,xt,ue,Tt,Ct,_e,At,Pt,Ye,us,ge,Dt,Mt,Ke,_s,Je,W,H,je,gs,Ft,we,Ot,Qe,Y,St,Bs,Lt,Bt,Ve,js,Xe,K,Ze,Ns,Nt,sa,F,ws,It,Is,Wt,Ut,Gt,bs,Rt,Ws,Ht,Yt,Kt,ks,Jt,Us,Qt,Vt,ea,vs,aa,U,J,be,ys,Xt,ke,Zt,ta,Gs,sn,na,Q,la,q,en,ve,an,tn,$s,ye,nn,ln,$e,on,rn,Ee,pn,cn,oa,Es,ra,Rs,hn,pa,zs,ia,V,fn,Hs,dn,mn,ca,qs,ha,X,un,xs,ze,_n,gn,fa,Ts,da,Z,jn,Cs,qe,wn,bn,ma,As,ua;return v=new Te({}),os=new Te({}),rs=new A({props:{code:`from datasets import load_dataset swag = load_dataset("swag", "regular")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>swag = load_dataset(<span class="hljs-string">&quot;swag&quot;</span>, <span class="hljs-string">&quot;regular&quot;</span>)`}}),ps=new A({props:{code:'swag["train"][0]',highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>swag[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;ending0&#x27;</span>: <span class="hljs-string">&#x27;passes by walking down the street playing their instruments.&#x27;</span>, <span class="hljs-string">&#x27;ending1&#x27;</span>: <span class="hljs-string">&#x27;has heard approaching them.&#x27;</span>, <span class="hljs-string">&#x27;ending2&#x27;</span>: <span class="hljs-string">&quot;arrives and they&#x27;re outside dancing and asleep.&quot;</span>, <span class="hljs-string">&#x27;ending3&#x27;</span>: <span class="hljs-string">&#x27;turns the lead singer watches the performance.&#x27;</span>, <span class="hljs-string">&#x27;fold-ind&#x27;</span>: <span class="hljs-string">&#x27;3416&#x27;</span>, <span class="hljs-string">&#x27;gold-source&#x27;</span>: <span class="hljs-string">&#x27;gold&#x27;</span>, <span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-number">0</span>, <span class="hljs-string">&#x27;sent1&#x27;</span>: <span class="hljs-string">&#x27;Members of the procession walk down the street holding small horn brass instruments.&#x27;</span>, <span class="hljs-string">&#x27;sent2&#x27;</span>: <span class="hljs-string">&#x27;A drum line&#x27;</span>, <span class="hljs-string">&#x27;startphrase&#x27;</span>: <span class="hljs-string">&#x27;Members of the procession walk down the street holding small horn brass instruments. A drum line&#x27;</span>, <span class="hljs-string">&#x27;video-id&#x27;</span>: <span class="hljs-string">&#x27;anetv_jkn6uvmqwh4&#x27;</span>}`}}),is=new Te({}),cs=new A({props:{code:`from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>)`}}),fs=new A({props:{code:`ending_names = ["ending0", "ending1", "ending2", "ending3"] def preprocess_function(examples): first_sentences = [[context] * 4 for context in examples["sent1"]] question_headers = examples["sent2"] second_sentences = [ [f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(question_headers) ] first_sentences = sum(first_sentences, []) second_sentences = sum(second_sentences, []) tokenized_examples = tokenizer(first_sentences, second_sentences, truncation=True) return {k: [v[i : i + 4] for i in range(0, len(v), 4)] for k, v in tokenized_examples.items()}`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>ending_names = [<span class="hljs-string">&quot;ending0&quot;</span>, <span class="hljs-string">&quot;ending1&quot;</span>, <span class="hljs-string">&quot;ending2&quot;</span>, <span class="hljs-string">&quot;ending3&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">preprocess_function</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> first_sentences = [[context] * <span class="hljs-number">4</span> <span class="hljs-keyword">for</span> context <span class="hljs-keyword">in</span> examples[<span class="hljs-string">&quot;sent1&quot;</span>]] <span class="hljs-meta">... </span> question_headers = examples[<span class="hljs-string">&quot;sent2&quot;</span>] <span class="hljs-meta">... </span> second_sentences = [ <span class="hljs-meta">... </span> [<span class="hljs-string">f&quot;<span class="hljs-subst">{header}</span> <span class="hljs-subst">{examples[end][i]}</span>&quot;</span> <span class="hljs-keyword">for</span> end <span class="hljs-keyword">in</span> ending_names] <span class="hljs-keyword">for</span> i, header <span class="hljs-keyword">in</span> <span class="hljs-built_in">enumerate</span>(question_headers) <span class="hljs-meta">... </span> ] <span class="hljs-meta">... </span> first_sentences = <span class="hljs-built_in">sum</span>(first_sentences, []) <span class="hljs-meta">... </span> second_sentences = <span class="hljs-built_in">sum</span>(second_sentences, []) <span class="hljs-meta">... </span> tokenized_examples = tokenizer(first_sentences, second_sentences, truncation=<span class="hljs-literal">True</span>) <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> {k: [v[i : i + <span class="hljs-number">4</span>] <span class="hljs-keyword">for</span> i <span class="hljs-keyword">in</span> <span class="hljs-built_in">range</span>(<span class="hljs-number">0</span>, <span class="hljs-built_in">len</span>(v), <span class="hljs-number">4</span>)] <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> tokenized_examples.items()}`}}),ms=new A({props:{code:"tokenized_swag = swag.map(preprocess_function, batched=True)",highlighted:'tokenized_swag = swag.<span class="hljs-built_in">map</span>(preprocess_function, batched=<span class="hljs-literal">True</span>)'}}),_s=new Al({props:{group1:{id:"pt",code:`from dataclasses import dataclass from transformers.tokenization_utils_base import PreTrainedTokenizerBase, PaddingStrategy from typing import Optional, Union import torch @dataclass class DataCollatorForMultipleChoice: """ Data collator that will dynamically pad the inputs for multiple choice received. """ tokenizer: PreTrainedTokenizerBase padding: Union[bool, str, PaddingStrategy] = True max_length: Optional[int] = None pad_to_multiple_of: Optional[int] = None def __call__(self, features): label_name = "label" if "label" in features[0].keys() else "labels" labels = [feature.pop(label_name) for feature in features] batch_size = len(features) num_choices = len(features[0]["input_ids"]) flattened_features = [ [{k: v[i] for k, v in feature.items()} for i in range(num_choices)] for feature in features ] flattened_features = sum(flattened_features, []) batch = self.tokenizer.pad( flattened_features, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="pt", ) batch = {k: v.view(batch_size, num_choices, -1) for k, v in batch.items()} batch["labels"] = torch.tensor(labels, dtype=torch.int64) return batch`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> dataclasses <span class="hljs-keyword">import</span> dataclass <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers.tokenization_utils_base <span class="hljs-keyword">import</span> PreTrainedTokenizerBase, PaddingStrategy <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> typing <span class="hljs-keyword">import</span> <span class="hljs-type">Optional</span>, <span class="hljs-type">Union</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>@dataclass <span class="hljs-meta">... </span><span class="hljs-keyword">class</span> <span class="hljs-title class_">DataCollatorForMultipleChoice</span>: <span class="hljs-meta">... </span> <span class="hljs-string">&quot;&quot;&quot; <span class="hljs-meta">... </span> Data collator that will dynamically pad the inputs for multiple choice received. <span class="hljs-meta">... </span> &quot;&quot;&quot;</span> <span class="hljs-meta">... </span> tokenizer: PreTrainedTokenizerBase <span class="hljs-meta">... </span> padding: <span class="hljs-type">Union</span>[<span class="hljs-built_in">bool</span>, <span class="hljs-built_in">str</span>, PaddingStrategy] = <span class="hljs-literal">True</span> <span class="hljs-meta">... </span> max_length: <span class="hljs-type">Optional</span>[<span class="hljs-built_in">int</span>] = <span class="hljs-literal">None</span> <span class="hljs-meta">... </span> pad_to_multiple_of: <span class="hljs-type">Optional</span>[<span class="hljs-built_in">int</span>] = <span class="hljs-literal">None</span> <span class="hljs-meta">... </span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">__call__</span>(<span class="hljs-params">self, features</span>): <span class="hljs-meta">... </span> label_name = <span class="hljs-string">&quot;label&quot;</span> <span class="hljs-keyword">if</span> <span class="hljs-string">&quot;label&quot;</span> <span class="hljs-keyword">in</span> features[<span class="hljs-number">0</span>].keys() <span class="hljs-keyword">else</span> <span class="hljs-string">&quot;labels&quot;</span> <span class="hljs-meta">... </span> labels = [feature.pop(label_name) <span class="hljs-keyword">for</span> feature <span class="hljs-keyword">in</span> features] <span class="hljs-meta">... </span> batch_size = <span class="hljs-built_in">len</span>(features) <span class="hljs-meta">... </span> num_choices = <span class="hljs-built_in">len</span>(features[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;input_ids&quot;</span>]) <span class="hljs-meta">... </span> flattened_features = [ <span class="hljs-meta">... </span> [{k: v[i] <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> feature.items()} <span class="hljs-keyword">for</span> i <span class="hljs-keyword">in</span> <span class="hljs-built_in">range</span>(num_choices)] <span class="hljs-keyword">for</span> feature <span class="hljs-keyword">in</span> features <span class="hljs-meta">... </span> ] <span class="hljs-meta">... </span> flattened_features = <span class="hljs-built_in">sum</span>(flattened_features, []) <span class="hljs-meta">... </span> batch = self.tokenizer.pad( <span class="hljs-meta">... </span> flattened_features, <span class="hljs-meta">... </span> padding=self.padding, <span class="hljs-meta">... </span> max_length=self.max_length, <span class="hljs-meta">... </span> pad_to_multiple_of=self.pad_to_multiple_of, <span class="hljs-meta">... </span> return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, <span class="hljs-meta">... </span> ) <span class="hljs-meta">... </span> batch = {k: v.view(batch_size, num_choices, -<span class="hljs-number">1</span>) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> batch.items()} <span class="hljs-meta">... </span> batch[<span class="hljs-string">&quot;labels&quot;</span>] = torch.tensor(labels, dtype=torch.int64) <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> batch`},group2:{id:"tf",code:`from dataclasses import dataclass from transformers.tokenization_utils_base import PreTrainedTokenizerBase, PaddingStrategy from typing import Optional, Union import tensorflow as tf @dataclass class DataCollatorForMultipleChoice: """ Data collator that will dynamically pad the inputs for multiple choice received. """ tokenizer: PreTrainedTokenizerBase padding: Union[bool, str, PaddingStrategy] = True max_length: Optional[int] = None pad_to_multiple_of: Optional[int] = None def __call__(self, features): label_name = "label" if "label" in features[0].keys() else "labels" labels = [feature.pop(label_name) for feature in features] batch_size = len(features) num_choices = len(features[0]["input_ids"]) flattened_features = [ [{k: v[i] for k, v in feature.items()} for i in range(num_choices)] for feature in features ] flattened_features = sum(flattened_features, []) batch = self.tokenizer.pad( flattened_features, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="tf", ) batch = {k: tf.reshape(v, (batch_size, num_choices, -1)) for k, v in batch.items()} batch["labels"] = tf.convert_to_tensor(labels, dtype=tf.int64) return batch`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> dataclasses <span class="hljs-keyword">import</span> dataclass <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers.tokenization_utils_base <span class="hljs-keyword">import</span> PreTrainedTokenizerBase, PaddingStrategy <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> typing <span class="hljs-keyword">import</span> <span class="hljs-type">Optional</span>, <span class="hljs-type">Union</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>@dataclass <span class="hljs-meta">... </span><span class="hljs-keyword">class</span> <span class="hljs-title class_">DataCollatorForMultipleChoice</span>: <span class="hljs-meta">... </span> <span class="hljs-string">&quot;&quot;&quot; <span class="hljs-meta">... </span> Data collator that will dynamically pad the inputs for multiple choice received. <span class="hljs-meta">... </span> &quot;&quot;&quot;</span> <span class="hljs-meta">... </span> tokenizer: PreTrainedTokenizerBase <span class="hljs-meta">... </span> padding: <span class="hljs-type">Union</span>[<span class="hljs-built_in">bool</span>, <span class="hljs-built_in">str</span>, PaddingStrategy] = <span class="hljs-literal">True</span> <span class="hljs-meta">... </span> max_length: <span class="hljs-type">Optional</span>[<span class="hljs-built_in">int</span>] = <span class="hljs-literal">None</span> <span class="hljs-meta">... </span> pad_to_multiple_of: <span class="hljs-type">Optional</span>[<span class="hljs-built_in">int</span>] = <span class="hljs-literal">None</span> <span class="hljs-meta">... </span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">__call__</span>(<span class="hljs-params">self, features</span>): <span class="hljs-meta">... </span> label_name = <span class="hljs-string">&quot;label&quot;</span> <span class="hljs-keyword">if</span> <span class="hljs-string">&quot;label&quot;</span> <span class="hljs-keyword">in</span> features[<span class="hljs-number">0</span>].keys() <span class="hljs-keyword">else</span> <span class="hljs-string">&quot;labels&quot;</span> <span class="hljs-meta">... </span> labels = [feature.pop(label_name) <span class="hljs-keyword">for</span> feature <span class="hljs-keyword">in</span> features] <span class="hljs-meta">... </span> batch_size = <span class="hljs-built_in">len</span>(features) <span class="hljs-meta">... </span> num_choices = <span class="hljs-built_in">len</span>(features[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;input_ids&quot;</span>]) <span class="hljs-meta">... </span> flattened_features = [ <span class="hljs-meta">... </span> [{k: v[i] <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> feature.items()} <span class="hljs-keyword">for</span> i <span class="hljs-keyword">in</span> <span class="hljs-built_in">range</span>(num_choices)] <span class="hljs-keyword">for</span> feature <span class="hljs-keyword">in</span> features <span class="hljs-meta">... </span> ] <span class="hljs-meta">... </span> flattened_features = <span class="hljs-built_in">sum</span>(flattened_features, []) <span class="hljs-meta">... </span> batch = self.tokenizer.pad( <span class="hljs-meta">... </span> flattened_features, <span class="hljs-meta">... </span> padding=self.padding, <span class="hljs-meta">... </span> max_length=self.max_length, <span class="hljs-meta">... </span> pad_to_multiple_of=self.pad_to_multiple_of, <span class="hljs-meta">... </span> return_tensors=<span class="hljs-string">&quot;tf&quot;</span>, <span class="hljs-meta">... </span> ) <span class="hljs-meta">... </span> batch = {k: tf.reshape(v, (batch_size, num_choices, -<span class="hljs-number">1</span>)) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> batch.items()} <span class="hljs-meta">... </span> batch[<span class="hljs-string">&quot;labels&quot;</span>] = tf.convert_to_tensor(labels, dtype=tf.int64) <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> batch`}}}),gs=new Te({}),js=new A({props:{code:`from transformers import AutoModelForMultipleChoice, TrainingArguments, Trainer model = AutoModelForMultipleChoice.from_pretrained("bert-base-uncased")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForMultipleChoice, TrainingArguments, Trainer <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForMultipleChoice.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>)`}}),K=new zl({props:{$$slots:{default:[Pl]},$$scope:{ctx:ts}}}),vs=new A({props:{code:`training_args = TrainingArguments( output_dir="./results", evaluation_strategy="epoch", learning_rate=5e-5, per_device_train_batch_size=16, per_device_eval_batch_size=16, num_train_epochs=3, weight_decay=0.01, ) trainer = Trainer( model=model, args=training_args, train_dataset=tokenized_swag["train"], eval_dataset=tokenized_swag["validation"], tokenizer=tokenizer, data_collator=DataCollatorForMultipleChoice(tokenizer=tokenizer), ) trainer.train()`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> evaluation_strategy=<span class="hljs-string">&quot;epoch&quot;</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">5e-5</span>, <span class="hljs-meta">... </span> per_device_train_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> per_device_eval_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> num_train_epochs=<span class="hljs-number">3</span>, <span class="hljs-meta">... </span> weight_decay=<span class="hljs-number">0.01</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=tokenized_swag[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=tokenized_swag[<span class="hljs-string">&quot;validation&quot;</span>], <span class="hljs-meta">... </span> tokenizer=tokenizer, <span class="hljs-meta">... </span> data_collator=DataCollatorForMultipleChoice(tokenizer=tokenizer), <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()`}}),ys=new Te({}),Q=new zl({props:{$$slots:{default:[Dl]},$$scope:{ctx:ts}}}),Es=new A({props:{code:`data_collator = DataCollatorForMultipleChoice(tokenizer=tokenizer) tf_train_set = tokenized_swag["train"].to_tf_dataset( columns=["attention_mask", "input_ids"], label_cols=["labels"], shuffle=True, batch_size=batch_size, collate_fn=data_collator, ) tf_validation_set = tokenized_swag["validation"].to_tf_dataset( columns=["attention_mask", "input_ids"], label_cols=["labels"], shuffle=False, batch_size=batch_size, collate_fn=data_collator, )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorForMultipleChoice(tokenizer=tokenizer) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_train_set = tokenized_swag[<span class="hljs-string">&quot;train&quot;</span>].to_tf_dataset( <span class="hljs-meta">... </span> columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>], <span class="hljs-meta">... </span> label_cols=[<span class="hljs-string">&quot;labels&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> batch_size=batch_size, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_validation_set = tokenized_swag[<span class="hljs-string">&quot;validation&quot;</span>].to_tf_dataset( <span class="hljs-meta">... </span> columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>], <span class="hljs-meta">... </span> label_cols=[<span class="hljs-string">&quot;labels&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> batch_size=batch_size, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>)`}}),zs=new A({props:{code:`from transformers import create_optimizer batch_size = 16 num_train_epochs = 2 total_train_steps = (len(tokenized_swag["train"]) // batch_size) * num_train_epochs optimizer, schedule = create_optimizer(init_lr=5e-5, num_warmup_steps=0, num_train_steps=total_train_steps)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> create_optimizer <span class="hljs-meta">&gt;&gt;&gt; </span>batch_size = <span class="hljs-number">16</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_train_epochs = <span class="hljs-number">2</span> <span class="hljs-meta">&gt;&gt;&gt; </span>total_train_steps = (<span class="hljs-built_in">len</span>(tokenized_swag[<span class="hljs-string">&quot;train&quot;</span>]) // batch_size) * num_train_epochs <span class="hljs-meta">&gt;&gt;&gt; </span>optimizer, schedule = create_optimizer(init_lr=<span class="hljs-number">5e-5</span>, num_warmup_steps=<span class="hljs-number">0</span>, num_train_steps=total_train_steps)`}}),qs=new A({props:{code:`from transformers import TFAutoModelForMultipleChoice model = TFAutoModelForMultipleChoice.from_pretrained("bert-base-uncased")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForMultipleChoice.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>)`}}),Ts=new A({props:{code:`model.compile( optimizer=optimizer, loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>model.<span class="hljs-built_in">compile</span>( <span class="hljs-meta">... </span> optimizer=optimizer, <span class="hljs-meta">... </span> loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=<span class="hljs-literal">True</span>), <span class="hljs-meta">... </span>)`}}),As=new A({props:{code:"model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=2)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=<span class="hljs-number">2</span>)'}}),{c(){d=o("meta"),$=c(),m=o("h1"),k=o("a"),x=o("span"),u(v.$$.fragment),y=c(),P=o("span"),xa=t("Multiple choice"),Ce=c(),Ds=o("p"),Ta=t("A multiple choice task is similar to question answering, except several candidate answers are provided along with a context. The model is trained to select the correct answer from multiple inputs given a context."),Ae=c(),T=o("p"),Ca=t("This guide will show you how to fine-tune "),ns=o("a"),Aa=t("BERT"),Pa=t(" on the "),Qs=o("code"),Da=t("regular"),Ma=t(" configuration of the "),ls=o("a"),Fa=t("SWAG"),Oa=t(" dataset to select the best answer given multiple options and some context."),Pe=c(),B=o("h2"),G=o("a"),Vs=o("span"),u(os.$$.fragment),Sa=c(),Xs=o("span"),La=t("Load SWAG dataset"),De=c(),Ms=o("p"),Ba=t("Load the SWAG dataset from the \u{1F917} Datasets library:"),Me=c(),u(rs.$$.fragment),Fe=c(),Fs=o("p"),Na=t("Then take a look at an example:"),Oe=c(),u(ps.$$.fragment),Se=c(),E=o("p"),Ia=t("The "),Zs=o("code"),Wa=t("sent1"),Ua=t(" and "),se=o("code"),Ga=t("sent2"),Ra=t(" fields show how a sentence begins, and each "),ee=o("code"),Ha=t("ending"),Ya=t(" field shows how a sentence could end. Given the sentence beginning, the model must pick the correct sentence ending as indicated by the "),ae=o("code"),Ka=t("label"),Ja=t(" field."),Le=c(),N=o("h2"),R=o("a"),te=o("span"),u(is.$$.fragment),Qa=c(),ne=o("span"),Va=t("Preprocess"),Be=c(),Os=o("p"),Xa=t("Load the BERT tokenizer to process the start of each sentence and the four possible endings:"),Ne=c(),u(cs.$$.fragment),Ie=c(),Ss=o("p"),Za=t("The preprocessing function needs to do:"),We=c(),M=o("ol"),I=o("li"),st=t("Make four copies of the "),le=o("code"),et=t("sent1"),at=t(" field so you can combine each of them with "),oe=o("code"),tt=t("sent2"),nt=t(" to recreate how a sentence starts."),lt=c(),hs=o("li"),ot=t("Combine "),re=o("code"),rt=t("sent2"),pt=t(" with each of the four possible sentence endings."),it=c(),D=o("li"),ct=t("Flatten these two lists so you can tokenize them, and then unflatten them afterward so each example has a corresponding "),pe=o("code"),ht=t("input_ids"),ft=t(", "),ie=o("code"),dt=t("attention_mask"),mt=t(", and "),ce=o("code"),ut=t("labels"),_t=t(" field."),Ue=c(),u(fs.$$.fragment),Ge=c(),C=o("p"),gt=t("Use \u{1F917} Datasets "),ds=o("a"),he=o("code"),jt=t("map"),wt=t(" function to apply the preprocessing function over the entire dataset. You can speed up the "),fe=o("code"),bt=t("map"),kt=t(" function by setting "),de=o("code"),vt=t("batched=True"),yt=t(" to process multiple elements of the dataset at once:"),Re=c(),u(ms.$$.fragment),He=c(),z=o("p"),$t=t("\u{1F917} Transformers doesn\u2019t have a data collator for multiple choice, so you will need to create one. You can adapt the "),Ls=o("a"),Et=t("DataCollatorWithPadding"),zt=t(" to create a batch of examples for multiple choice. It will also "),me=o("em"),qt=t("dynamically pad"),xt=t(" your text and labels to the length of the longest element in its batch, so they are a uniform length. While it is possible to pad your text in the "),ue=o("code"),Tt=t("tokenizer"),Ct=t(" function by setting "),_e=o("code"),At=t("padding=True"),Pt=t(", dynamic padding is more efficient."),Ye=c(),us=o("p"),ge=o("code"),Dt=t("DataCollatorForMultipleChoice"),Mt=t(" will flatten all the model inputs, apply padding, and then unflatten the results:"),Ke=c(),u(_s.$$.fragment),Je=c(),W=o("h2"),H=o("a"),je=o("span"),u(gs.$$.fragment),Ft=c(),we=o("span"),Ot=t("Fine-tune with Trainer"),Qe=c(),Y=o("p"),St=t("Load BERT with "),Bs=o("a"),Lt=t("AutoModelForMultipleChoice"),Bt=t(":"),Ve=c(),u(js.$$.fragment),Xe=c(),u(K.$$.fragment),Ze=c(),Ns=o("p"),Nt=t("At this point, only three steps remain:"),sa=c(),F=o("ol"),ws=o("li"),It=t("Define your training hyperparameters in "),Is=o("a"),Wt=t("TrainingArguments"),Ut=t("."),Gt=c(),bs=o("li"),Rt=t("Pass the training arguments to "),Ws=o("a"),Ht=t("Trainer"),Yt=t(" along with the model, dataset, tokenizer, and data collator."),Kt=c(),ks=o("li"),Jt=t("Call "),Us=o("a"),Qt=t("train()"),Vt=t(" to fine-tune your model."),ea=c(),u(vs.$$.fragment),aa=c(),U=o("h2"),J=o("a"),be=o("span"),u(ys.$$.fragment),Xt=c(),ke=o("span"),Zt=t("Fine-tune with TensorFlow"),ta=c(),Gs=o("p"),sn=t("To fine-tune a model in TensorFlow is just as easy, with only a few differences."),na=c(),u(Q.$$.fragment),la=c(),q=o("p"),en=t("Convert your datasets to the "),ve=o("code"),an=t("tf.data.Dataset"),tn=t(" format with "),$s=o("a"),ye=o("code"),nn=t("to_tf_dataset"),ln=t(". Specify inputs in "),$e=o("code"),on=t("columns"),rn=t(", targets in "),Ee=o("code"),pn=t("label_cols"),cn=t(", whether to shuffle the dataset order, batch size, and the data collator:"),oa=c(),u(Es.$$.fragment),ra=c(),Rs=o("p"),hn=t("Set up an optimizer function, learning rate schedule, and some training hyperparameters:"),pa=c(),u(zs.$$.fragment),ia=c(),V=o("p"),fn=t("Load BERT with "),Hs=o("a"),dn=t("TFAutoModelForMultipleChoice"),mn=t(":"),ca=c(),u(qs.$$.fragment),ha=c(),X=o("p"),un=t("Configure the model for training with "),xs=o("a"),ze=o("code"),_n=t("compile"),gn=t(":"),fa=c(),u(Ts.$$.fragment),da=c(),Z=o("p"),jn=t("Call "),Cs=o("a"),qe=o("code"),wn=t("fit"),bn=t(" to fine-tune the model:"),ma=c(),u(As.$$.fragment),this.h()},l(s){const l=Cl('[data-svelte="svelte-1phssyn"]',document.head);d=r(l,"META",{name:!0,content:!0}),l.forEach(a),$=h(s),m=r(s,"H1",{class:!0});var Ps=p(m);k=r(Ps,"A",{id:!0,class:!0,href:!0});var xe=p(k);x=r(xe,"SPAN",{});var vn=p(x);_(v.$$.fragment,vn),vn.forEach(a),xe.forEach(a),y=h(Ps),P=r(Ps,"SPAN",{});var yn=p(P);xa=n(yn,"Multiple choice"),yn.forEach(a),Ps.forEach(a),Ce=h(s),Ds=r(s,"P",{});var $n=p(Ds);Ta=n($n,"A multiple choice task is similar to question answering, except several candidate answers are provided along with a context. The model is trained to select the correct answer from multiple inputs given a context."),$n.forEach(a),Ae=h(s),T=r(s,"P",{});var ss=p(T);Ca=n(ss,"This guide will show you how to fine-tune "),ns=r(ss,"A",{href:!0,rel:!0});var En=p(ns);Aa=n(En,"BERT"),En.forEach(a),Pa=n(ss," on the "),Qs=r(ss,"CODE",{});var zn=p(Qs);Da=n(zn,"regular"),zn.forEach(a),Ma=n(ss," configuration of the "),ls=r(ss,"A",{href:!0,rel:!0});var qn=p(ls);Fa=n(qn,"SWAG"),qn.forEach(a),Oa=n(ss," dataset to select the best answer given multiple options and some context."),ss.forEach(a),Pe=h(s),B=r(s,"H2",{class:!0});var _a=p(B);G=r(_a,"A",{id:!0,class:!0,href:!0});var xn=p(G);Vs=r(xn,"SPAN",{});var Tn=p(Vs);_(os.$$.fragment,Tn),Tn.forEach(a),xn.forEach(a),Sa=h(_a),Xs=r(_a,"SPAN",{});var Cn=p(Xs);La=n(Cn,"Load SWAG dataset"),Cn.forEach(a),_a.forEach(a),De=h(s),Ms=r(s,"P",{});var An=p(Ms);Ba=n(An,"Load the SWAG dataset from the \u{1F917} Datasets library:"),An.forEach(a),Me=h(s),_(rs.$$.fragment,s),Fe=h(s),Fs=r(s,"P",{});var Pn=p(Fs);Na=n(Pn,"Then take a look at an example:"),Pn.forEach(a),Oe=h(s),_(ps.$$.fragment,s),Se=h(s),E=r(s,"P",{});var O=p(E);Ia=n(O,"The "),Zs=r(O,"CODE",{});var Dn=p(Zs);Wa=n(Dn,"sent1"),Dn.forEach(a),Ua=n(O," and "),se=r(O,"CODE",{});var Mn=p(se);Ga=n(Mn,"sent2"),Mn.forEach(a),Ra=n(O," fields show how a sentence begins, and each "),ee=r(O,"CODE",{});var Fn=p(ee);Ha=n(Fn,"ending"),Fn.forEach(a),Ya=n(O," field shows how a sentence could end. Given the sentence beginning, the model must pick the correct sentence ending as indicated by the "),ae=r(O,"CODE",{});var On=p(ae);Ka=n(On,"label"),On.forEach(a),Ja=n(O," field."),O.forEach(a),Le=h(s),N=r(s,"H2",{class:!0});var ga=p(N);R=r(ga,"A",{id:!0,class:!0,href:!0});var Sn=p(R);te=r(Sn,"SPAN",{});var Ln=p(te);_(is.$$.fragment,Ln),Ln.forEach(a),Sn.forEach(a),Qa=h(ga),ne=r(ga,"SPAN",{});var Bn=p(ne);Va=n(Bn,"Preprocess"),Bn.forEach(a),ga.forEach(a),Be=h(s),Os=r(s,"P",{});var Nn=p(Os);Xa=n(Nn,"Load the BERT tokenizer to process the start of each sentence and the four possible endings:"),Nn.forEach(a),Ne=h(s),_(cs.$$.fragment,s),Ie=h(s),Ss=r(s,"P",{});var In=p(Ss);Za=n(In,"The preprocessing function needs to do:"),In.forEach(a),We=h(s),M=r(s,"OL",{});var Ys=p(M);I=r(Ys,"LI",{});var Ks=p(I);st=n(Ks,"Make four copies of the "),le=r(Ks,"CODE",{});var Wn=p(le);et=n(Wn,"sent1"),Wn.forEach(a),at=n(Ks," field so you can combine each of them with "),oe=r(Ks,"CODE",{});var Un=p(oe);tt=n(Un,"sent2"),Un.forEach(a),nt=n(Ks," to recreate how a sentence starts."),Ks.forEach(a),lt=h(Ys),hs=r(Ys,"LI",{});var ja=p(hs);ot=n(ja,"Combine "),re=r(ja,"CODE",{});var Gn=p(re);rt=n(Gn,"sent2"),Gn.forEach(a),pt=n(ja," with each of the four possible sentence endings."),ja.forEach(a),it=h(Ys),D=r(Ys,"LI",{});var es=p(D);ct=n(es,"Flatten these two lists so you can tokenize them, and then unflatten them afterward so each example has a corresponding "),pe=r(es,"CODE",{});var Rn=p(pe);ht=n(Rn,"input_ids"),Rn.forEach(a),ft=n(es,", "),ie=r(es,"CODE",{});var Hn=p(ie);dt=n(Hn,"attention_mask"),Hn.forEach(a),mt=n(es,", and "),ce=r(es,"CODE",{});var Yn=p(ce);ut=n(Yn,"labels"),Yn.forEach(a),_t=n(es," field."),es.forEach(a),Ys.forEach(a),Ue=h(s),_(fs.$$.fragment,s),Ge=h(s),C=r(s,"P",{});var as=p(C);gt=n(as,"Use \u{1F917} Datasets "),ds=r(as,"A",{href:!0,rel:!0});var Kn=p(ds);he=r(Kn,"CODE",{});var Jn=p(he);jt=n(Jn,"map"),Jn.forEach(a),Kn.forEach(a),wt=n(as," function to apply the preprocessing function over the entire dataset. You can speed up the "),fe=r(as,"CODE",{});var Qn=p(fe);bt=n(Qn,"map"),Qn.forEach(a),kt=n(as," function by setting "),de=r(as,"CODE",{});var Vn=p(de);vt=n(Vn,"batched=True"),Vn.forEach(a),yt=n(as," to process multiple elements of the dataset at once:"),as.forEach(a),Re=h(s),_(ms.$$.fragment,s),He=h(s),z=r(s,"P",{});var S=p(z);$t=n(S,"\u{1F917} Transformers doesn\u2019t have a data collator for multiple choice, so you will need to create one. You can adapt the "),Ls=r(S,"A",{href:!0});var Xn=p(Ls);Et=n(Xn,"DataCollatorWithPadding"),Xn.forEach(a),zt=n(S," to create a batch of examples for multiple choice. It will also "),me=r(S,"EM",{});var Zn=p(me);qt=n(Zn,"dynamically pad"),Zn.forEach(a),xt=n(S," your text and labels to the length of the longest element in its batch, so they are a uniform length. While it is possible to pad your text in the "),ue=r(S,"CODE",{});var sl=p(ue);Tt=n(sl,"tokenizer"),sl.forEach(a),Ct=n(S," function by setting "),_e=r(S,"CODE",{});var el=p(_e);At=n(el,"padding=True"),el.forEach(a),Pt=n(S,", dynamic padding is more efficient."),S.forEach(a),Ye=h(s),us=r(s,"P",{});var kn=p(us);ge=r(kn,"CODE",{});var al=p(ge);Dt=n(al,"DataCollatorForMultipleChoice"),al.forEach(a),Mt=n(kn," will flatten all the model inputs, apply padding, and then unflatten the results:"),kn.forEach(a),Ke=h(s),_(_s.$$.fragment,s),Je=h(s),W=r(s,"H2",{class:!0});var wa=p(W);H=r(wa,"A",{id:!0,class:!0,href:!0});var tl=p(H);je=r(tl,"SPAN",{});var nl=p(je);_(gs.$$.fragment,nl),nl.forEach(a),tl.forEach(a),Ft=h(wa),we=r(wa,"SPAN",{});var ll=p(we);Ot=n(ll,"Fine-tune with Trainer"),ll.forEach(a),wa.forEach(a),Qe=h(s),Y=r(s,"P",{});var ba=p(Y);St=n(ba,"Load BERT with "),Bs=r(ba,"A",{href:!0});var ol=p(Bs);Lt=n(ol,"AutoModelForMultipleChoice"),ol.forEach(a),Bt=n(ba,":"),ba.forEach(a),Ve=h(s),_(js.$$.fragment,s),Xe=h(s),_(K.$$.fragment,s),Ze=h(s),Ns=r(s,"P",{});var rl=p(Ns);Nt=n(rl,"At this point, only three steps remain:"),rl.forEach(a),sa=h(s),F=r(s,"OL",{});var Js=p(F);ws=r(Js,"LI",{});var ka=p(ws);It=n(ka,"Define your training hyperparameters in "),Is=r(ka,"A",{href:!0});var pl=p(Is);Wt=n(pl,"TrainingArguments"),pl.forEach(a),Ut=n(ka,"."),ka.forEach(a),Gt=h(Js),bs=r(Js,"LI",{});var va=p(bs);Rt=n(va,"Pass the training arguments to "),Ws=r(va,"A",{href:!0});var il=p(Ws);Ht=n(il,"Trainer"),il.forEach(a),Yt=n(va," along with the model, dataset, tokenizer, and data collator."),va.forEach(a),Kt=h(Js),ks=r(Js,"LI",{});var ya=p(ks);Jt=n(ya,"Call "),Us=r(ya,"A",{href:!0});var cl=p(Us);Qt=n(cl,"train()"),cl.forEach(a),Vt=n(ya," to fine-tune your model."),ya.forEach(a),Js.forEach(a),ea=h(s),_(vs.$$.fragment,s),aa=h(s),U=r(s,"H2",{class:!0});var $a=p(U);J=r($a,"A",{id:!0,class:!0,href:!0});var hl=p(J);be=r(hl,"SPAN",{});var fl=p(be);_(ys.$$.fragment,fl),fl.forEach(a),hl.forEach(a),Xt=h($a),ke=r($a,"SPAN",{});var dl=p(ke);Zt=n(dl,"Fine-tune with TensorFlow"),dl.forEach(a),$a.forEach(a),ta=h(s),Gs=r(s,"P",{});var ml=p(Gs);sn=n(ml,"To fine-tune a model in TensorFlow is just as easy, with only a few differences."),ml.forEach(a),na=h(s),_(Q.$$.fragment,s),la=h(s),q=r(s,"P",{});var L=p(q);en=n(L,"Convert your datasets to the "),ve=r(L,"CODE",{});var ul=p(ve);an=n(ul,"tf.data.Dataset"),ul.forEach(a),tn=n(L," format with "),$s=r(L,"A",{href:!0,rel:!0});var _l=p($s);ye=r(_l,"CODE",{});var gl=p(ye);nn=n(gl,"to_tf_dataset"),gl.forEach(a),_l.forEach(a),ln=n(L,". Specify inputs in "),$e=r(L,"CODE",{});var jl=p($e);on=n(jl,"columns"),jl.forEach(a),rn=n(L,", targets in "),Ee=r(L,"CODE",{});var wl=p(Ee);pn=n(wl,"label_cols"),wl.forEach(a),cn=n(L,", whether to shuffle the dataset order, batch size, and the data collator:"),L.forEach(a),oa=h(s),_(Es.$$.fragment,s),ra=h(s),Rs=r(s,"P",{});var bl=p(Rs);hn=n(bl,"Set up an optimizer function, learning rate schedule, and some training hyperparameters:"),bl.forEach(a),pa=h(s),_(zs.$$.fragment,s),ia=h(s),V=r(s,"P",{});var Ea=p(V);fn=n(Ea,"Load BERT with "),Hs=r(Ea,"A",{href:!0});var kl=p(Hs);dn=n(kl,"TFAutoModelForMultipleChoice"),kl.forEach(a),mn=n(Ea,":"),Ea.forEach(a),ca=h(s),_(qs.$$.fragment,s),ha=h(s),X=r(s,"P",{});var za=p(X);un=n(za,"Configure the model for training with "),xs=r(za,"A",{href:!0,rel:!0});var vl=p(xs);ze=r(vl,"CODE",{});var yl=p(ze);_n=n(yl,"compile"),yl.forEach(a),vl.forEach(a),gn=n(za,":"),za.forEach(a),fa=h(s),_(Ts.$$.fragment,s),da=h(s),Z=r(s,"P",{});var qa=p(Z);jn=n(qa,"Call "),Cs=r(qa,"A",{href:!0,rel:!0});var $l=p(Cs);qe=r($l,"CODE",{});var El=p(qe);wn=n(El,"fit"),El.forEach(a),$l.forEach(a),bn=n(qa," to fine-tune the model:"),qa.forEach(a),ma=h(s),_(As.$$.fragment,s),this.h()},h(){f(d,"name","hf:doc:metadata"),f(d,"content",JSON.stringify(Fl)),f(k,"id","multiple-choice"),f(k,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(k,"href","#multiple-choice"),f(m,"class","relative group"),f(ns,"href","https://huggingface.co/bert-base-uncased"),f(ns,"rel","nofollow"),f(ls,"href","https://huggingface.co/datasets/swag"),f(ls,"rel","nofollow"),f(G,"id","load-swag-dataset"),f(G,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(G,"href","#load-swag-dataset"),f(B,"class","relative group"),f(R,"id","preprocess"),f(R,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(R,"href","#preprocess"),f(N,"class","relative group"),f(ds,"href","https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map"),f(ds,"rel","nofollow"),f(Ls,"href","/docs/transformers/pr_16143/en/main_classes/data_collator#transformers.DataCollatorWithPadding"),f(H,"id","finetune-with-trainer"),f(H,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(H,"href","#finetune-with-trainer"),f(W,"class","relative group"),f(Bs,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoModelForMultipleChoice"),f(Is,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments"),f(Ws,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),f(Us,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer.train"),f(J,"id","finetune-with-tensorflow"),f(J,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(J,"href","#finetune-with-tensorflow"),f(U,"class","relative group"),f($s,"href","https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.to_tf_dataset"),f($s,"rel","nofollow"),f(Hs,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.TFAutoModelForMultipleChoice"),f(xs,"href","https://keras.io/api/models/model_training_apis/#compile-method"),f(xs,"rel","nofollow"),f(Cs,"href","https://keras.io/api/models/model_training_apis/#fit-method"),f(Cs,"rel","nofollow")},m(s,l){e(document.head,d),i(s,$,l),i(s,m,l),e(m,k),e(k,x),g(v,x,null),e(m,y),e(m,P),e(P,xa),i(s,Ce,l),i(s,Ds,l),e(Ds,Ta),i(s,Ae,l),i(s,T,l),e(T,Ca),e(T,ns),e(ns,Aa),e(T,Pa),e(T,Qs),e(Qs,Da),e(T,Ma),e(T,ls),e(ls,Fa),e(T,Oa),i(s,Pe,l),i(s,B,l),e(B,G),e(G,Vs),g(os,Vs,null),e(B,Sa),e(B,Xs),e(Xs,La),i(s,De,l),i(s,Ms,l),e(Ms,Ba),i(s,Me,l),g(rs,s,l),i(s,Fe,l),i(s,Fs,l),e(Fs,Na),i(s,Oe,l),g(ps,s,l),i(s,Se,l),i(s,E,l),e(E,Ia),e(E,Zs),e(Zs,Wa),e(E,Ua),e(E,se),e(se,Ga),e(E,Ra),e(E,ee),e(ee,Ha),e(E,Ya),e(E,ae),e(ae,Ka),e(E,Ja),i(s,Le,l),i(s,N,l),e(N,R),e(R,te),g(is,te,null),e(N,Qa),e(N,ne),e(ne,Va),i(s,Be,l),i(s,Os,l),e(Os,Xa),i(s,Ne,l),g(cs,s,l),i(s,Ie,l),i(s,Ss,l),e(Ss,Za),i(s,We,l),i(s,M,l),e(M,I),e(I,st),e(I,le),e(le,et),e(I,at),e(I,oe),e(oe,tt),e(I,nt),e(M,lt),e(M,hs),e(hs,ot),e(hs,re),e(re,rt),e(hs,pt),e(M,it),e(M,D),e(D,ct),e(D,pe),e(pe,ht),e(D,ft),e(D,ie),e(ie,dt),e(D,mt),e(D,ce),e(ce,ut),e(D,_t),i(s,Ue,l),g(fs,s,l),i(s,Ge,l),i(s,C,l),e(C,gt),e(C,ds),e(ds,he),e(he,jt),e(C,wt),e(C,fe),e(fe,bt),e(C,kt),e(C,de),e(de,vt),e(C,yt),i(s,Re,l),g(ms,s,l),i(s,He,l),i(s,z,l),e(z,$t),e(z,Ls),e(Ls,Et),e(z,zt),e(z,me),e(me,qt),e(z,xt),e(z,ue),e(ue,Tt),e(z,Ct),e(z,_e),e(_e,At),e(z,Pt),i(s,Ye,l),i(s,us,l),e(us,ge),e(ge,Dt),e(us,Mt),i(s,Ke,l),g(_s,s,l),i(s,Je,l),i(s,W,l),e(W,H),e(H,je),g(gs,je,null),e(W,Ft),e(W,we),e(we,Ot),i(s,Qe,l),i(s,Y,l),e(Y,St),e(Y,Bs),e(Bs,Lt),e(Y,Bt),i(s,Ve,l),g(js,s,l),i(s,Xe,l),g(K,s,l),i(s,Ze,l),i(s,Ns,l),e(Ns,Nt),i(s,sa,l),i(s,F,l),e(F,ws),e(ws,It),e(ws,Is),e(Is,Wt),e(ws,Ut),e(F,Gt),e(F,bs),e(bs,Rt),e(bs,Ws),e(Ws,Ht),e(bs,Yt),e(F,Kt),e(F,ks),e(ks,Jt),e(ks,Us),e(Us,Qt),e(ks,Vt),i(s,ea,l),g(vs,s,l),i(s,aa,l),i(s,U,l),e(U,J),e(J,be),g(ys,be,null),e(U,Xt),e(U,ke),e(ke,Zt),i(s,ta,l),i(s,Gs,l),e(Gs,sn),i(s,na,l),g(Q,s,l),i(s,la,l),i(s,q,l),e(q,en),e(q,ve),e(ve,an),e(q,tn),e(q,$s),e($s,ye),e(ye,nn),e(q,ln),e(q,$e),e($e,on),e(q,rn),e(q,Ee),e(Ee,pn),e(q,cn),i(s,oa,l),g(Es,s,l),i(s,ra,l),i(s,Rs,l),e(Rs,hn),i(s,pa,l),g(zs,s,l),i(s,ia,l),i(s,V,l),e(V,fn),e(V,Hs),e(Hs,dn),e(V,mn),i(s,ca,l),g(qs,s,l),i(s,ha,l),i(s,X,l),e(X,un),e(X,xs),e(xs,ze),e(ze,_n),e(X,gn),i(s,fa,l),g(Ts,s,l),i(s,da,l),i(s,Z,l),e(Z,jn),e(Z,Cs),e(Cs,qe),e(qe,wn),e(Z,bn),i(s,ma,l),g(As,s,l),ua=!0},p(s,[l]){const Ps={};l&2&&(Ps.$$scope={dirty:l,ctx:s}),K.$set(Ps);const xe={};l&2&&(xe.$$scope={dirty:l,ctx:s}),Q.$set(xe)},i(s){ua||(j(v.$$.fragment,s),j(os.$$.fragment,s),j(rs.$$.fragment,s),j(ps.$$.fragment,s),j(is.$$.fragment,s),j(cs.$$.fragment,s),j(fs.$$.fragment,s),j(ms.$$.fragment,s),j(_s.$$.fragment,s),j(gs.$$.fragment,s),j(js.$$.fragment,s),j(K.$$.fragment,s),j(vs.$$.fragment,s),j(ys.$$.fragment,s),j(Q.$$.fragment,s),j(Es.$$.fragment,s),j(zs.$$.fragment,s),j(qs.$$.fragment,s),j(Ts.$$.fragment,s),j(As.$$.fragment,s),ua=!0)},o(s){w(v.$$.fragment,s),w(os.$$.fragment,s),w(rs.$$.fragment,s),w(ps.$$.fragment,s),w(is.$$.fragment,s),w(cs.$$.fragment,s),w(fs.$$.fragment,s),w(ms.$$.fragment,s),w(_s.$$.fragment,s),w(gs.$$.fragment,s),w(js.$$.fragment,s),w(K.$$.fragment,s),w(vs.$$.fragment,s),w(ys.$$.fragment,s),w(Q.$$.fragment,s),w(Es.$$.fragment,s),w(zs.$$.fragment,s),w(qs.$$.fragment,s),w(Ts.$$.fragment,s),w(As.$$.fragment,s),ua=!1},d(s){a(d),s&&a($),s&&a(m),b(v),s&&a(Ce),s&&a(Ds),s&&a(Ae),s&&a(T),s&&a(Pe),s&&a(B),b(os),s&&a(De),s&&a(Ms),s&&a(Me),b(rs,s),s&&a(Fe),s&&a(Fs),s&&a(Oe),b(ps,s),s&&a(Se),s&&a(E),s&&a(Le),s&&a(N),b(is),s&&a(Be),s&&a(Os),s&&a(Ne),b(cs,s),s&&a(Ie),s&&a(Ss),s&&a(We),s&&a(M),s&&a(Ue),b(fs,s),s&&a(Ge),s&&a(C),s&&a(Re),b(ms,s),s&&a(He),s&&a(z),s&&a(Ye),s&&a(us),s&&a(Ke),b(_s,s),s&&a(Je),s&&a(W),b(gs),s&&a(Qe),s&&a(Y),s&&a(Ve),b(js,s),s&&a(Xe),b(K,s),s&&a(Ze),s&&a(Ns),s&&a(sa),s&&a(F),s&&a(ea),b(vs,s),s&&a(aa),s&&a(U),b(ys),s&&a(ta),s&&a(Gs),s&&a(na),b(Q,s),s&&a(la),s&&a(q),s&&a(oa),b(Es,s),s&&a(ra),s&&a(Rs),s&&a(pa),b(zs,s),s&&a(ia),s&&a(V),s&&a(ca),b(qs,s),s&&a(ha),s&&a(X),s&&a(fa),b(Ts,s),s&&a(da),s&&a(Z),s&&a(ma),b(As,s)}}}const Fl={local:"multiple-choice",sections:[{local:"load-swag-dataset",title:"Load SWAG dataset"},{local:"preprocess",title:"Preprocess"},{local:"finetune-with-trainer",title:"Fine-tune with Trainer"},{local:"finetune-with-tensorflow",title:"Fine-tune with TensorFlow"}],title:"Multiple choice"};function Ol(ts,d,$){let{fw:m}=d;return ts.$$set=k=>{"fw"in k&&$(0,m=k.fw)},[m]}class Ul extends ql{constructor(d){super();xl(this,d,Ol,Ml,Tl,{fw:0})}}export{Ul as default,Fl as metadata};
410
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/tasks/image_classification.mdx-e10d3b71.js
import{S as zs,i as Ss,s as Ls,e as o,k as m,w as $,t as l,M as Ns,c as n,d as a,m as f,a as i,x as v,h as r,b as c,F as t,g as p,y as b,q as w,o as j,B as y}from"../../chunks/vendor-4833417e.js";import{T as Xt}from"../../chunks/Tip-fffd6df1.js";import{Y as Os}from"../../chunks/Youtube-27813aed.js";import{I as Va}from"../../chunks/IconCopyLink-4b81c553.js";import{C as P}from"../../chunks/CodeBlock-6a3d1b46.js";import"../../chunks/CopyButton-dacfbfaf.js";function Rs(z){let h,k,d,g,E;return{c(){h=o("p"),k=l("See the image classification "),d=o("a"),g=l("task page"),E=l(" for more information about its associated models, datasets, and metrics."),this.h()},l(u){h=n(u,"P",{});var _=i(h);k=r(_,"See the image classification "),d=n(_,"A",{href:!0,rel:!0});var x=i(d);g=r(x,"task page"),x.forEach(a),E=r(_," for more information about its associated models, datasets, and metrics."),_.forEach(a),this.h()},h(){c(d,"href","https://huggingface.co/tasks/audio-classification"),c(d,"rel","nofollow")},m(u,_){p(u,h,_),t(h,k),t(h,d),t(d,g),t(h,E)},d(u){u&&a(h)}}}function Ms(z){let h,k,d,g,E,u,_,x;return{c(){h=o("p"),k=l("If you aren\u2019t familiar with fine-tuning a model with the "),d=o("a"),g=l("Trainer"),E=l(", take a look at the basic tutorial "),u=o("a"),_=l("here"),x=l("!"),this.h()},l(S){h=n(S,"P",{});var A=i(h);k=r(A,"If you aren\u2019t familiar with fine-tuning a model with the "),d=n(A,"A",{href:!0});var D=i(d);g=r(D,"Trainer"),D.forEach(a),E=r(A,", take a look at the basic tutorial "),u=n(A,"A",{href:!0});var X=i(u);_=r(X,"here"),X.forEach(a),x=r(A,"!"),A.forEach(a),this.h()},h(){c(d,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),c(u,"href","training#finetune-with-trainer")},m(S,A){p(S,h,A),t(h,k),t(h,d),t(d,g),t(h,E),t(h,u),t(u,_),t(h,x)},d(S){S&&a(h)}}}function Us(z){let h,k,d,g,E;return{c(){h=o("p"),k=l("For a more in-depth example of how to fine-tune a model for image classification, take a look at the corresponding "),d=o("a"),g=l("PyTorch notebook"),E=l("."),this.h()},l(u){h=n(u,"P",{});var _=i(h);k=r(_,"For a more in-depth example of how to fine-tune a model for image classification, take a look at the corresponding "),d=n(_,"A",{href:!0,rel:!0});var x=i(d);g=r(x,"PyTorch notebook"),x.forEach(a),E=r(_,"."),_.forEach(a),this.h()},h(){c(d,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/image_classification.ipynb"),c(d,"rel","nofollow")},m(u,_){p(u,h,_),t(h,k),t(h,d),t(d,g),t(h,E)},d(u){u&&a(h)}}}function Bs(z){let h,k,d,g,E,u,_,x,S,A,D,X,we,Ga,ea,q,Ja,Z,Ya,Wa,ee,Ka,Qa,aa,R,ta,L,M,Se,ae,Xa,Le,Za,sa,je,et,la,te,ra,ye,at,oa,se,na,ke,tt,ia,le,pa,F,st,Ne,lt,rt,Oe,ot,nt,ma,re,fa,Ee,it,ha,oe,ca,U,pt,Re,mt,ft,da,N,B,Me,ne,ht,Ue,ct,ua,xe,dt,ga,ie,_a,H,ut,pe,Be,gt,_t,$a,me,va,V,$t,He,vt,bt,ba,fe,wa,G,wt,he,Ve,jt,yt,ja,ce,ya,J,kt,Te,Et,xt,ka,de,Ea,O,Y,Ge,ue,Tt,Je,At,xa,W,Ct,Ae,Pt,Dt,Ta,ge,Aa,K,Ca,Ce,qt,Pa,I,T,Ft,Pe,It,zt,Ye,St,Lt,We,Nt,Ot,Ke,Rt,Mt,Qe,Ut,Bt,Ht,_e,Vt,De,Gt,Jt,Yt,$e,Wt,qe,Kt,Qt,Da,ve,qa,Q,Fa;return u=new Va({}),D=new Os({props:{id:"tjAIM7BOYhw"}}),R=new Xt({props:{$$slots:{default:[Rs]},$$scope:{ctx:z}}}),ae=new Va({}),te=new P({props:{code:`from datasets import load_dataset food = load_dataset("food101", split="train[:5000]")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>food = load_dataset(<span class="hljs-string">&quot;food101&quot;</span>, split=<span class="hljs-string">&quot;train[:5000]&quot;</span>)`}}),se=new P({props:{code:"food = food.train_test_split(test_size=0.2)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>food = food.train_test_split(test_size=<span class="hljs-number">0.2</span>)'}}),le=new P({props:{code:'food["train"][0]',highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>food[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;image&#x27;</span>: &lt;PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=512x512 at <span class="hljs-number">0x7F52AFC8AC50</span>&gt;, <span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-number">79</span>}`}}),re=new P({props:{code:`labels = food["train"].features["label"].names label2id, id2label = dict(), dict() for i, label in enumerate(labels): label2id[label] = str(i) id2label[str(i)] = label`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>labels = food[<span class="hljs-string">&quot;train&quot;</span>].features[<span class="hljs-string">&quot;label&quot;</span>].names <span class="hljs-meta">&gt;&gt;&gt; </span>label2id, id2label = <span class="hljs-built_in">dict</span>(), <span class="hljs-built_in">dict</span>() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> i, label <span class="hljs-keyword">in</span> <span class="hljs-built_in">enumerate</span>(labels): <span class="hljs-meta">... </span> label2id[label] = <span class="hljs-built_in">str</span>(i) <span class="hljs-meta">... </span> id2label[<span class="hljs-built_in">str</span>(i)] = label`}}),oe=new P({props:{code:"id2label[str(79)]",highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>id2label[<span class="hljs-built_in">str</span>(<span class="hljs-number">79</span>)] <span class="hljs-string">&#x27;prime_rib&#x27;</span>`}}),ne=new Va({}),ie=new P({props:{code:`from transformers import AutoFeatureExtractor feature_extractor = AutoFeatureExtractor.from_pretrained("google/vit-base-patch16-224-in21k")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoFeatureExtractor <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = AutoFeatureExtractor.from_pretrained(<span class="hljs-string">&quot;google/vit-base-patch16-224-in21k&quot;</span>)`}}),me=new P({props:{code:`from torchvision.transforms import RandomResizedCrop, Compose, Normalize, ToTensor normalize = Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std) _transforms = Compose([RandomResizedCrop(feature_extractor.size), ToTensor(), normalize])`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> torchvision.transforms <span class="hljs-keyword">import</span> RandomResizedCrop, Compose, Normalize, ToTensor <span class="hljs-meta">&gt;&gt;&gt; </span>normalize = Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std) <span class="hljs-meta">&gt;&gt;&gt; </span>_transforms = Compose([RandomResizedCrop(feature_extractor.size), ToTensor(), normalize])`}}),fe=new P({props:{code:`def transforms(examples): examples["pixel_values"] = [_transforms(img.convert("RGB")) for img in examples["image"]] del examples["image"] return examples`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">transforms</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> examples[<span class="hljs-string">&quot;pixel_values&quot;</span>] = [_transforms(img.convert(<span class="hljs-string">&quot;RGB&quot;</span>)) <span class="hljs-keyword">for</span> img <span class="hljs-keyword">in</span> examples[<span class="hljs-string">&quot;image&quot;</span>]] <span class="hljs-meta">... </span> <span class="hljs-keyword">del</span> examples[<span class="hljs-string">&quot;image&quot;</span>] <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> examples`}}),ce=new P({props:{code:"food = food.with_transform(transforms)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>food = food.with_transform(transforms)'}}),de=new P({props:{code:`from transformers import DefaultDataCollator data_collator = DefaultDataCollator()`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DefaultDataCollator <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DefaultDataCollator()`}}),ue=new Va({}),ge=new P({props:{code:`from transformers import AutoModelForImageClassification, TrainingArguments, Trainer model = AutoModelForImageClassification.from_pretrained( "google/vit-base-patch16-224-in21k", num_labels=len(labels), id2label=id2label, label2id=label2id, )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForImageClassification, TrainingArguments, Trainer <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForImageClassification.from_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;google/vit-base-patch16-224-in21k&quot;</span>, <span class="hljs-meta">... </span> num_labels=<span class="hljs-built_in">len</span>(labels), <span class="hljs-meta">... </span> id2label=id2label, <span class="hljs-meta">... </span> label2id=label2id, <span class="hljs-meta">... </span>)`}}),K=new Xt({props:{$$slots:{default:[Ms]},$$scope:{ctx:z}}}),ve=new P({props:{code:`training_args = TrainingArguments( output_dir="./results", per_device_train_batch_size=16, evaluation_strategy="steps", num_train_epochs=4, fp16=True, save_steps=100, eval_steps=100, logging_steps=10, learning_rate=2e-4, save_total_limit=2, remove_unused_columns=False, ) trainer = Trainer( model=model, args=training_args, data_collator=data_collator, train_dataset=food["train"], eval_dataset=food["test"], tokenizer=feature_extractor, ) trainer.train()`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> per_device_train_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> evaluation_strategy=<span class="hljs-string">&quot;steps&quot;</span>, <span class="hljs-meta">... </span> num_train_epochs=<span class="hljs-number">4</span>, <span class="hljs-meta">... </span> fp16=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> save_steps=<span class="hljs-number">100</span>, <span class="hljs-meta">... </span> eval_steps=<span class="hljs-number">100</span>, <span class="hljs-meta">... </span> logging_steps=<span class="hljs-number">10</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">2e-4</span>, <span class="hljs-meta">... </span> save_total_limit=<span class="hljs-number">2</span>, <span class="hljs-meta">... </span> remove_unused_columns=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> data_collator=data_collator, <span class="hljs-meta">... </span> train_dataset=food[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=food[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> tokenizer=feature_extractor, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()`}}),Q=new Xt({props:{$$slots:{default:[Us]},$$scope:{ctx:z}}}),{c(){h=o("meta"),k=m(),d=o("h1"),g=o("a"),E=o("span"),$(u.$$.fragment),_=m(),x=o("span"),S=l("Image classification"),A=m(),$(D.$$.fragment),X=m(),we=o("p"),Ga=l("Image classification assigns a label or class to an image. Unlike text or audio classification, the inputs are the pixel values that represent an image. There are many uses for image classification, like detecting damage after a disaster, monitoring crop health, or helping screen medical images for signs of disease."),ea=m(),q=o("p"),Ja=l("This guide will show you how to fine-tune "),Z=o("a"),Ya=l("ViT"),Wa=l(" on the "),ee=o("a"),Ka=l("Food-101"),Qa=l(" dataset to classify a food item in an image."),aa=m(),$(R.$$.fragment),ta=m(),L=o("h2"),M=o("a"),Se=o("span"),$(ae.$$.fragment),Xa=m(),Le=o("span"),Za=l("Load Food-101 dataset"),sa=m(),je=o("p"),et=l("Load only the first 5000 images of the Food-101 dataset from the \u{1F917} Datasets library since it is pretty large:"),la=m(),$(te.$$.fragment),ra=m(),ye=o("p"),at=l("Split this dataset into a train and test set:"),oa=m(),$(se.$$.fragment),na=m(),ke=o("p"),tt=l("Then take a look at an example:"),ia=m(),$(le.$$.fragment),pa=m(),F=o("p"),st=l("The "),Ne=o("code"),lt=l("image"),rt=l(" field contains a PIL image, and each "),Oe=o("code"),ot=l("label"),nt=l(" is an integer that represents a class. Create a dictionary that maps a label name to an integer and vice versa. The mapping will help the model recover the label name from the label number:"),ma=m(),$(re.$$.fragment),fa=m(),Ee=o("p"),it=l("Now you can convert the label number to a label name for more information:"),ha=m(),$(oe.$$.fragment),ca=m(),U=o("p"),pt=l("Each food class - or label - corresponds to a number; "),Re=o("code"),mt=l("79"),ft=l(" indicates a prime rib in the example above."),da=m(),N=o("h2"),B=o("a"),Me=o("span"),$(ne.$$.fragment),ht=m(),Ue=o("span"),ct=l("Preprocess"),ua=m(),xe=o("p"),dt=l("Load the ViT feature extractor to process the image into a tensor:"),ga=m(),$(ie.$$.fragment),_a=m(),H=o("p"),ut=l("Apply several image transformations to the dataset to make the model more robust against overfitting. Here you\u2019ll use torchvision\u2019s "),pe=o("a"),Be=o("code"),gt=l("transforms"),_t=l(" module. Crop a random part of the image, resize it, and normalize it with the image mean and standard deviation:"),$a=m(),$(me.$$.fragment),va=m(),V=o("p"),$t=l("Create a preprocessing function that will apply the transforms and return the "),He=o("code"),vt=l("pixel_values"),bt=l(" - the inputs to the model - of the image:"),ba=m(),$(fe.$$.fragment),wa=m(),G=o("p"),wt=l("Use \u{1F917} Dataset\u2019s "),he=o("a"),Ve=o("code"),jt=l("with_transform"),yt=l(" method to apply the transforms over the entire dataset. The transforms are applied on-the-fly when you load an element of the dataset:"),ja=m(),$(ce.$$.fragment),ya=m(),J=o("p"),kt=l("Use "),Te=o("a"),Et=l("DefaultDataCollator"),xt=l(" to create a batch of examples. Unlike other data collators in \u{1F917} Transformers, the DefaultDataCollator does not apply additional preprocessing such as padding."),ka=m(),$(de.$$.fragment),Ea=m(),O=o("h2"),Y=o("a"),Ge=o("span"),$(ue.$$.fragment),Tt=m(),Je=o("span"),At=l("Fine-tune with Trainer"),xa=m(),W=o("p"),Ct=l("Load ViT with "),Ae=o("a"),Pt=l("AutoModelForImageClassification"),Dt=l(". Specify the number of labels, and pass the model the mapping between label number and label class:"),Ta=m(),$(ge.$$.fragment),Aa=m(),$(K.$$.fragment),Ca=m(),Ce=o("p"),qt=l("At this point, only three steps remain:"),Pa=m(),I=o("ol"),T=o("li"),Ft=l("Define your training hyperparameters in "),Pe=o("a"),It=l("TrainingArguments"),zt=l(". It is important you don\u2019t remove unused columns because this will drop the "),Ye=o("code"),St=l("image"),Lt=l(" column. Without the "),We=o("code"),Nt=l("image"),Ot=l(" column, you can\u2019t create "),Ke=o("code"),Rt=l("pixel_values"),Mt=l(". Set "),Qe=o("code"),Ut=l("remove_unused_columns=False"),Bt=l(" to prevent this behavior!"),Ht=m(),_e=o("li"),Vt=l("Pass the training arguments to "),De=o("a"),Gt=l("Trainer"),Jt=l(" along with the model, datasets, tokenizer, and data collator."),Yt=m(),$e=o("li"),Wt=l("Call "),qe=o("a"),Kt=l("train()"),Qt=l(" to fine-tune your model."),Da=m(),$(ve.$$.fragment),qa=m(),$(Q.$$.fragment),this.h()},l(e){const s=Ns('[data-svelte="svelte-1phssyn"]',document.head);h=n(s,"META",{name:!0,content:!0}),s.forEach(a),k=f(e),d=n(e,"H1",{class:!0});var be=i(d);g=n(be,"A",{id:!0,class:!0,href:!0});var Xe=i(g);E=n(Xe,"SPAN",{});var Ze=i(E);v(u.$$.fragment,Ze),Ze.forEach(a),Xe.forEach(a),_=f(be),x=n(be,"SPAN",{});var Zt=i(x);S=r(Zt,"Image classification"),Zt.forEach(a),be.forEach(a),A=f(e),v(D.$$.fragment,e),X=f(e),we=n(e,"P",{});var es=i(we);Ga=r(es,"Image classification assigns a label or class to an image. Unlike text or audio classification, the inputs are the pixel values that represent an image. There are many uses for image classification, like detecting damage after a disaster, monitoring crop health, or helping screen medical images for signs of disease."),es.forEach(a),ea=f(e),q=n(e,"P",{});var Fe=i(q);Ja=r(Fe,"This guide will show you how to fine-tune "),Z=n(Fe,"A",{href:!0,rel:!0});var as=i(Z);Ya=r(as,"ViT"),as.forEach(a),Wa=r(Fe," on the "),ee=n(Fe,"A",{href:!0,rel:!0});var ts=i(ee);Ka=r(ts,"Food-101"),ts.forEach(a),Qa=r(Fe," dataset to classify a food item in an image."),Fe.forEach(a),aa=f(e),v(R.$$.fragment,e),ta=f(e),L=n(e,"H2",{class:!0});var Ia=i(L);M=n(Ia,"A",{id:!0,class:!0,href:!0});var ss=i(M);Se=n(ss,"SPAN",{});var ls=i(Se);v(ae.$$.fragment,ls),ls.forEach(a),ss.forEach(a),Xa=f(Ia),Le=n(Ia,"SPAN",{});var rs=i(Le);Za=r(rs,"Load Food-101 dataset"),rs.forEach(a),Ia.forEach(a),sa=f(e),je=n(e,"P",{});var os=i(je);et=r(os,"Load only the first 5000 images of the Food-101 dataset from the \u{1F917} Datasets library since it is pretty large:"),os.forEach(a),la=f(e),v(te.$$.fragment,e),ra=f(e),ye=n(e,"P",{});var ns=i(ye);at=r(ns,"Split this dataset into a train and test set:"),ns.forEach(a),oa=f(e),v(se.$$.fragment,e),na=f(e),ke=n(e,"P",{});var is=i(ke);tt=r(is,"Then take a look at an example:"),is.forEach(a),ia=f(e),v(le.$$.fragment,e),pa=f(e),F=n(e,"P",{});var Ie=i(F);st=r(Ie,"The "),Ne=n(Ie,"CODE",{});var ps=i(Ne);lt=r(ps,"image"),ps.forEach(a),rt=r(Ie," field contains a PIL image, and each "),Oe=n(Ie,"CODE",{});var ms=i(Oe);ot=r(ms,"label"),ms.forEach(a),nt=r(Ie," is an integer that represents a class. Create a dictionary that maps a label name to an integer and vice versa. The mapping will help the model recover the label name from the label number:"),Ie.forEach(a),ma=f(e),v(re.$$.fragment,e),fa=f(e),Ee=n(e,"P",{});var fs=i(Ee);it=r(fs,"Now you can convert the label number to a label name for more information:"),fs.forEach(a),ha=f(e),v(oe.$$.fragment,e),ca=f(e),U=n(e,"P",{});var za=i(U);pt=r(za,"Each food class - or label - corresponds to a number; "),Re=n(za,"CODE",{});var hs=i(Re);mt=r(hs,"79"),hs.forEach(a),ft=r(za," indicates a prime rib in the example above."),za.forEach(a),da=f(e),N=n(e,"H2",{class:!0});var Sa=i(N);B=n(Sa,"A",{id:!0,class:!0,href:!0});var cs=i(B);Me=n(cs,"SPAN",{});var ds=i(Me);v(ne.$$.fragment,ds),ds.forEach(a),cs.forEach(a),ht=f(Sa),Ue=n(Sa,"SPAN",{});var us=i(Ue);ct=r(us,"Preprocess"),us.forEach(a),Sa.forEach(a),ua=f(e),xe=n(e,"P",{});var gs=i(xe);dt=r(gs,"Load the ViT feature extractor to process the image into a tensor:"),gs.forEach(a),ga=f(e),v(ie.$$.fragment,e),_a=f(e),H=n(e,"P",{});var La=i(H);ut=r(La,"Apply several image transformations to the dataset to make the model more robust against overfitting. Here you\u2019ll use torchvision\u2019s "),pe=n(La,"A",{href:!0,rel:!0});var _s=i(pe);Be=n(_s,"CODE",{});var $s=i(Be);gt=r($s,"transforms"),$s.forEach(a),_s.forEach(a),_t=r(La," module. Crop a random part of the image, resize it, and normalize it with the image mean and standard deviation:"),La.forEach(a),$a=f(e),v(me.$$.fragment,e),va=f(e),V=n(e,"P",{});var Na=i(V);$t=r(Na,"Create a preprocessing function that will apply the transforms and return the "),He=n(Na,"CODE",{});var vs=i(He);vt=r(vs,"pixel_values"),vs.forEach(a),bt=r(Na," - the inputs to the model - of the image:"),Na.forEach(a),ba=f(e),v(fe.$$.fragment,e),wa=f(e),G=n(e,"P",{});var Oa=i(G);wt=r(Oa,"Use \u{1F917} Dataset\u2019s "),he=n(Oa,"A",{href:!0,rel:!0});var bs=i(he);Ve=n(bs,"CODE",{});var ws=i(Ve);jt=r(ws,"with_transform"),ws.forEach(a),bs.forEach(a),yt=r(Oa," method to apply the transforms over the entire dataset. The transforms are applied on-the-fly when you load an element of the dataset:"),Oa.forEach(a),ja=f(e),v(ce.$$.fragment,e),ya=f(e),J=n(e,"P",{});var Ra=i(J);kt=r(Ra,"Use "),Te=n(Ra,"A",{href:!0});var js=i(Te);Et=r(js,"DefaultDataCollator"),js.forEach(a),xt=r(Ra," to create a batch of examples. Unlike other data collators in \u{1F917} Transformers, the DefaultDataCollator does not apply additional preprocessing such as padding."),Ra.forEach(a),ka=f(e),v(de.$$.fragment,e),Ea=f(e),O=n(e,"H2",{class:!0});var Ma=i(O);Y=n(Ma,"A",{id:!0,class:!0,href:!0});var ys=i(Y);Ge=n(ys,"SPAN",{});var ks=i(Ge);v(ue.$$.fragment,ks),ks.forEach(a),ys.forEach(a),Tt=f(Ma),Je=n(Ma,"SPAN",{});var Es=i(Je);At=r(Es,"Fine-tune with Trainer"),Es.forEach(a),Ma.forEach(a),xa=f(e),W=n(e,"P",{});var Ua=i(W);Ct=r(Ua,"Load ViT with "),Ae=n(Ua,"A",{href:!0});var xs=i(Ae);Pt=r(xs,"AutoModelForImageClassification"),xs.forEach(a),Dt=r(Ua,". Specify the number of labels, and pass the model the mapping between label number and label class:"),Ua.forEach(a),Ta=f(e),v(ge.$$.fragment,e),Aa=f(e),v(K.$$.fragment,e),Ca=f(e),Ce=n(e,"P",{});var Ts=i(Ce);qt=r(Ts,"At this point, only three steps remain:"),Ts.forEach(a),Pa=f(e),I=n(e,"OL",{});var ze=i(I);T=n(ze,"LI",{});var C=i(T);Ft=r(C,"Define your training hyperparameters in "),Pe=n(C,"A",{href:!0});var As=i(Pe);It=r(As,"TrainingArguments"),As.forEach(a),zt=r(C,". It is important you don\u2019t remove unused columns because this will drop the "),Ye=n(C,"CODE",{});var Cs=i(Ye);St=r(Cs,"image"),Cs.forEach(a),Lt=r(C," column. Without the "),We=n(C,"CODE",{});var Ps=i(We);Nt=r(Ps,"image"),Ps.forEach(a),Ot=r(C," column, you can\u2019t create "),Ke=n(C,"CODE",{});var Ds=i(Ke);Rt=r(Ds,"pixel_values"),Ds.forEach(a),Mt=r(C,". Set "),Qe=n(C,"CODE",{});var qs=i(Qe);Ut=r(qs,"remove_unused_columns=False"),qs.forEach(a),Bt=r(C," to prevent this behavior!"),C.forEach(a),Ht=f(ze),_e=n(ze,"LI",{});var Ba=i(_e);Vt=r(Ba,"Pass the training arguments to "),De=n(Ba,"A",{href:!0});var Fs=i(De);Gt=r(Fs,"Trainer"),Fs.forEach(a),Jt=r(Ba," along with the model, datasets, tokenizer, and data collator."),Ba.forEach(a),Yt=f(ze),$e=n(ze,"LI",{});var Ha=i($e);Wt=r(Ha,"Call "),qe=n(Ha,"A",{href:!0});var Is=i(qe);Kt=r(Is,"train()"),Is.forEach(a),Qt=r(Ha," to fine-tune your model."),Ha.forEach(a),ze.forEach(a),Da=f(e),v(ve.$$.fragment,e),qa=f(e),v(Q.$$.fragment,e),this.h()},h(){c(h,"name","hf:doc:metadata"),c(h,"content",JSON.stringify(Hs)),c(g,"id","image-classification"),c(g,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(g,"href","#image-classification"),c(d,"class","relative group"),c(Z,"href","https://huggingface.co/docs/transformers/v4.16.2/en/model_doc/vit"),c(Z,"rel","nofollow"),c(ee,"href","https://huggingface.co/datasets/food101"),c(ee,"rel","nofollow"),c(M,"id","load-food101-dataset"),c(M,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(M,"href","#load-food101-dataset"),c(L,"class","relative group"),c(B,"id","preprocess"),c(B,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(B,"href","#preprocess"),c(N,"class","relative group"),c(pe,"href","https://pytorch.org/vision/stable/transforms.html"),c(pe,"rel","nofollow"),c(he,"href","https://huggingface.co/docs/datasets/package_reference/main_classes.html?#datasets.Dataset.with_transform"),c(he,"rel","nofollow"),c(Te,"href","/docs/transformers/pr_16143/en/main_classes/data_collator#transformers.DefaultDataCollator"),c(Y,"id","finetune-with-trainer"),c(Y,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Y,"href","#finetune-with-trainer"),c(O,"class","relative group"),c(Ae,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoModelForImageClassification"),c(Pe,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments"),c(De,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),c(qe,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer.train")},m(e,s){t(document.head,h),p(e,k,s),p(e,d,s),t(d,g),t(g,E),b(u,E,null),t(d,_),t(d,x),t(x,S),p(e,A,s),b(D,e,s),p(e,X,s),p(e,we,s),t(we,Ga),p(e,ea,s),p(e,q,s),t(q,Ja),t(q,Z),t(Z,Ya),t(q,Wa),t(q,ee),t(ee,Ka),t(q,Qa),p(e,aa,s),b(R,e,s),p(e,ta,s),p(e,L,s),t(L,M),t(M,Se),b(ae,Se,null),t(L,Xa),t(L,Le),t(Le,Za),p(e,sa,s),p(e,je,s),t(je,et),p(e,la,s),b(te,e,s),p(e,ra,s),p(e,ye,s),t(ye,at),p(e,oa,s),b(se,e,s),p(e,na,s),p(e,ke,s),t(ke,tt),p(e,ia,s),b(le,e,s),p(e,pa,s),p(e,F,s),t(F,st),t(F,Ne),t(Ne,lt),t(F,rt),t(F,Oe),t(Oe,ot),t(F,nt),p(e,ma,s),b(re,e,s),p(e,fa,s),p(e,Ee,s),t(Ee,it),p(e,ha,s),b(oe,e,s),p(e,ca,s),p(e,U,s),t(U,pt),t(U,Re),t(Re,mt),t(U,ft),p(e,da,s),p(e,N,s),t(N,B),t(B,Me),b(ne,Me,null),t(N,ht),t(N,Ue),t(Ue,ct),p(e,ua,s),p(e,xe,s),t(xe,dt),p(e,ga,s),b(ie,e,s),p(e,_a,s),p(e,H,s),t(H,ut),t(H,pe),t(pe,Be),t(Be,gt),t(H,_t),p(e,$a,s),b(me,e,s),p(e,va,s),p(e,V,s),t(V,$t),t(V,He),t(He,vt),t(V,bt),p(e,ba,s),b(fe,e,s),p(e,wa,s),p(e,G,s),t(G,wt),t(G,he),t(he,Ve),t(Ve,jt),t(G,yt),p(e,ja,s),b(ce,e,s),p(e,ya,s),p(e,J,s),t(J,kt),t(J,Te),t(Te,Et),t(J,xt),p(e,ka,s),b(de,e,s),p(e,Ea,s),p(e,O,s),t(O,Y),t(Y,Ge),b(ue,Ge,null),t(O,Tt),t(O,Je),t(Je,At),p(e,xa,s),p(e,W,s),t(W,Ct),t(W,Ae),t(Ae,Pt),t(W,Dt),p(e,Ta,s),b(ge,e,s),p(e,Aa,s),b(K,e,s),p(e,Ca,s),p(e,Ce,s),t(Ce,qt),p(e,Pa,s),p(e,I,s),t(I,T),t(T,Ft),t(T,Pe),t(Pe,It),t(T,zt),t(T,Ye),t(Ye,St),t(T,Lt),t(T,We),t(We,Nt),t(T,Ot),t(T,Ke),t(Ke,Rt),t(T,Mt),t(T,Qe),t(Qe,Ut),t(T,Bt),t(I,Ht),t(I,_e),t(_e,Vt),t(_e,De),t(De,Gt),t(_e,Jt),t(I,Yt),t(I,$e),t($e,Wt),t($e,qe),t(qe,Kt),t($e,Qt),p(e,Da,s),b(ve,e,s),p(e,qa,s),b(Q,e,s),Fa=!0},p(e,[s]){const be={};s&2&&(be.$$scope={dirty:s,ctx:e}),R.$set(be);const Xe={};s&2&&(Xe.$$scope={dirty:s,ctx:e}),K.$set(Xe);const Ze={};s&2&&(Ze.$$scope={dirty:s,ctx:e}),Q.$set(Ze)},i(e){Fa||(w(u.$$.fragment,e),w(D.$$.fragment,e),w(R.$$.fragment,e),w(ae.$$.fragment,e),w(te.$$.fragment,e),w(se.$$.fragment,e),w(le.$$.fragment,e),w(re.$$.fragment,e),w(oe.$$.fragment,e),w(ne.$$.fragment,e),w(ie.$$.fragment,e),w(me.$$.fragment,e),w(fe.$$.fragment,e),w(ce.$$.fragment,e),w(de.$$.fragment,e),w(ue.$$.fragment,e),w(ge.$$.fragment,e),w(K.$$.fragment,e),w(ve.$$.fragment,e),w(Q.$$.fragment,e),Fa=!0)},o(e){j(u.$$.fragment,e),j(D.$$.fragment,e),j(R.$$.fragment,e),j(ae.$$.fragment,e),j(te.$$.fragment,e),j(se.$$.fragment,e),j(le.$$.fragment,e),j(re.$$.fragment,e),j(oe.$$.fragment,e),j(ne.$$.fragment,e),j(ie.$$.fragment,e),j(me.$$.fragment,e),j(fe.$$.fragment,e),j(ce.$$.fragment,e),j(de.$$.fragment,e),j(ue.$$.fragment,e),j(ge.$$.fragment,e),j(K.$$.fragment,e),j(ve.$$.fragment,e),j(Q.$$.fragment,e),Fa=!1},d(e){a(h),e&&a(k),e&&a(d),y(u),e&&a(A),y(D,e),e&&a(X),e&&a(we),e&&a(ea),e&&a(q),e&&a(aa),y(R,e),e&&a(ta),e&&a(L),y(ae),e&&a(sa),e&&a(je),e&&a(la),y(te,e),e&&a(ra),e&&a(ye),e&&a(oa),y(se,e),e&&a(na),e&&a(ke),e&&a(ia),y(le,e),e&&a(pa),e&&a(F),e&&a(ma),y(re,e),e&&a(fa),e&&a(Ee),e&&a(ha),y(oe,e),e&&a(ca),e&&a(U),e&&a(da),e&&a(N),y(ne),e&&a(ua),e&&a(xe),e&&a(ga),y(ie,e),e&&a(_a),e&&a(H),e&&a($a),y(me,e),e&&a(va),e&&a(V),e&&a(ba),y(fe,e),e&&a(wa),e&&a(G),e&&a(ja),y(ce,e),e&&a(ya),e&&a(J),e&&a(ka),y(de,e),e&&a(Ea),e&&a(O),y(ue),e&&a(xa),e&&a(W),e&&a(Ta),y(ge,e),e&&a(Aa),y(K,e),e&&a(Ca),e&&a(Ce),e&&a(Pa),e&&a(I),e&&a(Da),y(ve,e),e&&a(qa),y(Q,e)}}}const Hs={local:"image-classification",sections:[{local:"load-food101-dataset",title:"Load Food-101 dataset"},{local:"preprocess",title:"Preprocess"},{local:"finetune-with-trainer",title:"Fine-tune with Trainer"}],title:"Image classification"};function Vs(z,h,k){let{fw:d}=h;return z.$$set=g=>{"fw"in g&&k(0,d=g.fw)},[d]}class Xs extends zs{constructor(h){super();Ss(this,h,Vs,Bs,Ls,{fw:0})}}export{Xs as default,Hs as metadata};
411
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/tasks/question_answering.mdx-8babb2cc.js
import{S as No,i as Ro,s as Uo,e as r,k as h,w,t as a,M as Ho,c as l,d as e,m as f,a as i,x as j,h as n,b as c,F as t,g as p,y as $,q as v,o as x,B as q}from"../../chunks/vendor-4833417e.js";import{T as He}from"../../chunks/Tip-fffd6df1.js";import{Y as Bo}from"../../chunks/Youtube-27813aed.js";import{I as Nt}from"../../chunks/IconCopyLink-4b81c553.js";import{C}from"../../chunks/CodeBlock-6a3d1b46.js";import{C as Vo}from"../../chunks/CodeBlockFw-27a176a0.js";import"../../chunks/CopyButton-dacfbfaf.js";function Yo(F){let u,b,d,_,y;return{c(){u=r("p"),b=a("See the question answering "),d=r("a"),_=a("task page"),y=a(" for more information about other forms of question answering and their associated models, datasets, and metrics."),this.h()},l(m){u=l(m,"P",{});var g=i(u);b=n(g,"See the question answering "),d=l(g,"A",{href:!0,rel:!0});var E=i(d);_=n(E,"task page"),E.forEach(e),y=n(g," for more information about other forms of question answering and their associated models, datasets, and metrics."),g.forEach(e),this.h()},h(){c(d,"href","https://huggingface.co/tasks/question-answering"),c(d,"rel","nofollow")},m(m,g){p(m,u,g),t(u,b),t(u,d),t(d,_),t(u,y)},d(m){m&&e(u)}}}function Jo(F){let u,b,d,_,y,m,g,E;return{c(){u=r("p"),b=a("If you aren\u2019t familiar with fine-tuning a model with the "),d=r("a"),_=a("Trainer"),y=a(", take a look at the basic tutorial "),m=r("a"),g=a("here"),E=a("!"),this.h()},l(A){u=l(A,"P",{});var k=i(u);b=n(k,"If you aren\u2019t familiar with fine-tuning a model with the "),d=l(k,"A",{href:!0});var T=i(d);_=n(T,"Trainer"),T.forEach(e),y=n(k,", take a look at the basic tutorial "),m=l(k,"A",{href:!0});var O=i(m);g=n(O,"here"),O.forEach(e),E=n(k,"!"),k.forEach(e),this.h()},h(){c(d,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),c(m,"href","training#finetune-with-trainer")},m(A,k){p(A,u,k),t(u,b),t(u,d),t(d,_),t(u,y),t(u,m),t(m,g),t(u,E)},d(A){A&&e(u)}}}function Go(F){let u,b,d,_,y;return{c(){u=r("p"),b=a("If you aren\u2019t familiar with fine-tuning a model with Keras, take a look at the basic tutorial "),d=r("a"),_=a("here"),y=a("!"),this.h()},l(m){u=l(m,"P",{});var g=i(u);b=n(g,"If you aren\u2019t familiar with fine-tuning a model with Keras, take a look at the basic tutorial "),d=l(g,"A",{href:!0});var E=i(d);_=n(E,"here"),E.forEach(e),y=n(g,"!"),g.forEach(e),this.h()},h(){c(d,"href","training#finetune-with-keras")},m(m,g){p(m,u,g),t(u,b),t(u,d),t(d,_),t(u,y)},d(m){m&&e(u)}}}function Ko(F){let u,b,d,_,y,m,g,E;return{c(){u=r("p"),b=a(`For a more in-depth example of how to fine-tune a model for question answering, take a look at the corresponding `),d=r("a"),_=a("PyTorch notebook"),y=a(` or `),m=r("a"),g=a("TensorFlow notebook"),E=a("."),this.h()},l(A){u=l(A,"P",{});var k=i(u);b=n(k,`For a more in-depth example of how to fine-tune a model for question answering, take a look at the corresponding `),d=l(k,"A",{href:!0,rel:!0});var T=i(d);_=n(T,"PyTorch notebook"),T.forEach(e),y=n(k,` or `),m=l(k,"A",{href:!0,rel:!0});var O=i(m);g=n(O,"TensorFlow notebook"),O.forEach(e),E=n(k,"."),k.forEach(e),this.h()},h(){c(d,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/question_answering.ipynb"),c(d,"rel","nofollow"),c(m,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/question_answering-tf.ipynb"),c(m,"rel","nofollow")},m(A,k){p(A,u,k),t(u,b),t(u,d),t(d,_),t(u,y),t(u,m),t(m,g),t(u,E)},d(A){A&&e(u)}}}function Wo(F){let u,b,d,_,y,m,g,E,A,k,T,O,Bs,Ve,Rt,J,lt,Ye,Je,it,Ge,Ut,Q,Ke,cs,We,Xe,us,Ze,sa,Ht,G,Vt,R,K,pt,ds,ta,ht,ea,Yt,Ns,aa,Jt,ms,Gt,Rs,na,Kt,_s,Wt,L,oa,ft,ra,la,ct,ia,pa,Xt,U,W,ut,gs,ha,dt,fa,Zt,ws,se,M,ca,mt,ua,da,_t,ma,_a,te,js,ee,Us,ga,ae,I,P,wa,gt,ja,$a,wt,va,xa,jt,qa,ba,ka,H,ya,$t,Ea,Aa,vt,Ta,Da,za,S,Ca,$s,xt,Fa,Pa,qt,Sa,Oa,bt,Qa,La,ne,X,Ma,kt,Ia,Ba,oe,vs,re,D,Na,xs,yt,Ra,Ua,Et,Ha,Va,At,Ya,Ja,le,qs,ie,B,Ga,Hs,Ka,Wa,Tt,Xa,Za,pe,bs,he,V,Z,Dt,ks,sn,zt,tn,fe,ss,en,Vs,an,nn,ce,ys,ue,ts,de,Ys,on,me,N,Es,rn,Js,ln,pn,hn,As,fn,Gs,cn,un,dn,Ts,mn,Ks,_n,gn,_e,Ds,ge,Y,es,Ct,zs,wn,Ft,jn,we,Ws,$n,je,as,$e,z,vn,Pt,xn,qn,Cs,St,bn,kn,Ot,yn,En,ve,Fs,xe,Xs,An,qe,Ps,be,ns,Tn,Zs,Dn,zn,ke,Ss,ye,os,Cn,Os,Qt,Fn,Pn,Ee,Qs,Ae,rs,Sn,Ls,Lt,On,Qn,Te,Ms,De,ls,ze;return m=new Nt({}),T=new Bo({props:{id:"ajPx5LwJD-I"}}),G=new He({props:{$$slots:{default:[Yo]},$$scope:{ctx:F}}}),ds=new Nt({}),ms=new C({props:{code:`from datasets import load_dataset squad = load_dataset("squad")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>squad = load_dataset(<span class="hljs-string">&quot;squad&quot;</span>)`}}),_s=new C({props:{code:'squad["train"][0]',highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>squad[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;answers&#x27;</span>: {<span class="hljs-string">&#x27;answer_start&#x27;</span>: [<span class="hljs-number">515</span>], <span class="hljs-string">&#x27;text&#x27;</span>: [<span class="hljs-string">&#x27;Saint Bernadette Soubirous&#x27;</span>]}, <span class="hljs-string">&#x27;context&#x27;</span>: <span class="hljs-string">&#x27;Architecturally, the school has a Catholic character. Atop the Main Building\\&#x27;s gold dome is a golden statue of the Virgin Mary. Immediately in front of the Main Building and facing it, is a copper statue of Christ with arms upraised with the legend &quot;Venite Ad Me Omnes&quot;. Next to the Main Building is the Basilica of the Sacred Heart. Immediately behind the basilica is the Grotto, a Marian place of prayer and reflection. It is a replica of the grotto at Lourdes, France where the Virgin Mary reputedly appeared to Saint Bernadette Soubirous in 1858. At the end of the main drive (and in a direct line that connects through 3 statues and the Gold Dome), is a simple, modern stone statue of Mary.&#x27;</span>, <span class="hljs-string">&#x27;id&#x27;</span>: <span class="hljs-string">&#x27;5733be284776f41900661182&#x27;</span>, <span class="hljs-string">&#x27;question&#x27;</span>: <span class="hljs-string">&#x27;To whom did the Virgin Mary allegedly appear in 1858 in Lourdes France?&#x27;</span>, <span class="hljs-string">&#x27;title&#x27;</span>: <span class="hljs-string">&#x27;University_of_Notre_Dame&#x27;</span> }`}}),gs=new Nt({}),ws=new Bo({props:{id:"qgaM0weJHpA"}}),js=new C({props:{code:`from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)`}}),vs=new C({props:{code:`def preprocess_function(examples): questions = [q.strip() for q in examples["question"]] inputs = tokenizer( questions, examples["context"], max_length=384, truncation="only_second", return_offsets_mapping=True, padding="max_length", ) offset_mapping = inputs.pop("offset_mapping") answers = examples["answers"] start_positions = [] end_positions = [] for i, offset in enumerate(offset_mapping): answer = answers[i] start_char = answer["answer_start"][0] end_char = answer["answer_start"][0] + len(answer["text"][0]) sequence_ids = inputs.sequence_ids(i) # Find the start and end of the context idx = 0 while sequence_ids[idx] != 1: idx += 1 context_start = idx while sequence_ids[idx] == 1: idx += 1 context_end = idx - 1 # If the answer is not fully inside the context, label it (0, 0) if offset[context_start][0] > end_char or offset[context_end][1] < start_char: start_positions.append(0) end_positions.append(0) else: # Otherwise it's the start and end token positions idx = context_start while idx <= context_end and offset[idx][0] <= start_char: idx += 1 start_positions.append(idx - 1) idx = context_end while idx >= context_start and offset[idx][1] >= end_char: idx -= 1 end_positions.append(idx + 1) inputs["start_positions"] = start_positions inputs["end_positions"] = end_positions return inputs`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">preprocess_function</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> questions = [q.strip() <span class="hljs-keyword">for</span> q <span class="hljs-keyword">in</span> examples[<span class="hljs-string">&quot;question&quot;</span>]] <span class="hljs-meta">... </span> inputs = tokenizer( <span class="hljs-meta">... </span> questions, <span class="hljs-meta">... </span> examples[<span class="hljs-string">&quot;context&quot;</span>], <span class="hljs-meta">... </span> max_length=<span class="hljs-number">384</span>, <span class="hljs-meta">... </span> truncation=<span class="hljs-string">&quot;only_second&quot;</span>, <span class="hljs-meta">... </span> return_offsets_mapping=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> padding=<span class="hljs-string">&quot;max_length&quot;</span>, <span class="hljs-meta">... </span> ) <span class="hljs-meta">... </span> offset_mapping = inputs.pop(<span class="hljs-string">&quot;offset_mapping&quot;</span>) <span class="hljs-meta">... </span> answers = examples[<span class="hljs-string">&quot;answers&quot;</span>] <span class="hljs-meta">... </span> start_positions = [] <span class="hljs-meta">... </span> end_positions = [] <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> i, offset <span class="hljs-keyword">in</span> <span class="hljs-built_in">enumerate</span>(offset_mapping): <span class="hljs-meta">... </span> answer = answers[i] <span class="hljs-meta">... </span> start_char = answer[<span class="hljs-string">&quot;answer_start&quot;</span>][<span class="hljs-number">0</span>] <span class="hljs-meta">... </span> end_char = answer[<span class="hljs-string">&quot;answer_start&quot;</span>][<span class="hljs-number">0</span>] + <span class="hljs-built_in">len</span>(answer[<span class="hljs-string">&quot;text&quot;</span>][<span class="hljs-number">0</span>]) <span class="hljs-meta">... </span> sequence_ids = inputs.sequence_ids(i) <span class="hljs-meta">... </span> <span class="hljs-comment"># Find the start and end of the context</span> <span class="hljs-meta">... </span> idx = <span class="hljs-number">0</span> <span class="hljs-meta">... </span> <span class="hljs-keyword">while</span> sequence_ids[idx] != <span class="hljs-number">1</span>: <span class="hljs-meta">... </span> idx += <span class="hljs-number">1</span> <span class="hljs-meta">... </span> context_start = idx <span class="hljs-meta">... </span> <span class="hljs-keyword">while</span> sequence_ids[idx] == <span class="hljs-number">1</span>: <span class="hljs-meta">... </span> idx += <span class="hljs-number">1</span> <span class="hljs-meta">... </span> context_end = idx - <span class="hljs-number">1</span> <span class="hljs-meta">... </span> <span class="hljs-comment"># If the answer is not fully inside the context, label it (0, 0)</span> <span class="hljs-meta">... </span> <span class="hljs-keyword">if</span> offset[context_start][<span class="hljs-number">0</span>] &gt; end_char <span class="hljs-keyword">or</span> offset[context_end][<span class="hljs-number">1</span>] &lt; start_char: <span class="hljs-meta">... </span> start_positions.append(<span class="hljs-number">0</span>) <span class="hljs-meta">... </span> end_positions.append(<span class="hljs-number">0</span>) <span class="hljs-meta">... </span> <span class="hljs-keyword">else</span>: <span class="hljs-meta">... </span> <span class="hljs-comment"># Otherwise it&#x27;s the start and end token positions</span> <span class="hljs-meta">... </span> idx = context_start <span class="hljs-meta">... </span> <span class="hljs-keyword">while</span> idx &lt;= context_end <span class="hljs-keyword">and</span> offset[idx][<span class="hljs-number">0</span>] &lt;= start_char: <span class="hljs-meta">... </span> idx += <span class="hljs-number">1</span> <span class="hljs-meta">... </span> start_positions.append(idx - <span class="hljs-number">1</span>) <span class="hljs-meta">... </span> idx = context_end <span class="hljs-meta">... </span> <span class="hljs-keyword">while</span> idx &gt;= context_start <span class="hljs-keyword">and</span> offset[idx][<span class="hljs-number">1</span>] &gt;= end_char: <span class="hljs-meta">... </span> idx -= <span class="hljs-number">1</span> <span class="hljs-meta">... </span> end_positions.append(idx + <span class="hljs-number">1</span>) <span class="hljs-meta">... </span> inputs[<span class="hljs-string">&quot;start_positions&quot;</span>] = start_positions <span class="hljs-meta">... </span> inputs[<span class="hljs-string">&quot;end_positions&quot;</span>] = end_positions <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> inputs`}}),qs=new C({props:{code:'tokenized_squad = squad.map(preprocess_function, batched=True, remove_columns=squad["train"].column_names)',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>tokenized_squad = squad.<span class="hljs-built_in">map</span>(preprocess_function, batched=<span class="hljs-literal">True</span>, remove_columns=squad[<span class="hljs-string">&quot;train&quot;</span>].column_names)'}}),bs=new Vo({props:{group1:{id:"pt",code:`from transformers import DefaultDataCollator data_collator = DefaultDataCollator()`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DefaultDataCollator <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DefaultDataCollator()`},group2:{id:"tf",code:`from transformers import DefaultDataCollator data_collator = DefaultDataCollator(return_tensors="tf")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DefaultDataCollator <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DefaultDataCollator(return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)`}}}),ks=new Nt({}),ys=new C({props:{code:`from transformers import AutoModelForQuestionAnswering, TrainingArguments, Trainer model = AutoModelForQuestionAnswering.from_pretrained("distilbert-base-uncased")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForQuestionAnswering, TrainingArguments, Trainer <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForQuestionAnswering.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)`}}),ts=new He({props:{$$slots:{default:[Jo]},$$scope:{ctx:F}}}),Ds=new C({props:{code:`training_args = TrainingArguments( output_dir="./results", evaluation_strategy="epoch", learning_rate=2e-5, per_device_train_batch_size=16, per_device_eval_batch_size=16, num_train_epochs=3, weight_decay=0.01, ) trainer = Trainer( model=model, args=training_args, train_dataset=tokenized_squad["train"], eval_dataset=tokenized_squad["validation"], tokenizer=tokenizer, data_collator=data_collator, ) trainer.train()`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> evaluation_strategy=<span class="hljs-string">&quot;epoch&quot;</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">2e-5</span>, <span class="hljs-meta">... </span> per_device_train_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> per_device_eval_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> num_train_epochs=<span class="hljs-number">3</span>, <span class="hljs-meta">... </span> weight_decay=<span class="hljs-number">0.01</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=tokenized_squad[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=tokenized_squad[<span class="hljs-string">&quot;validation&quot;</span>], <span class="hljs-meta">... </span> tokenizer=tokenizer, <span class="hljs-meta">... </span> data_collator=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()`}}),zs=new Nt({}),as=new He({props:{$$slots:{default:[Go]},$$scope:{ctx:F}}}),Fs=new C({props:{code:`tf_train_set = tokenized_squad["train"].to_tf_dataset( columns=["attention_mask", "input_ids", "start_positions", "end_positions"], dummy_labels=True, shuffle=True, batch_size=16, collate_fn=data_collator, ) tf_validation_set = tokenized_squad["validation"].to_tf_dataset( columns=["attention_mask", "input_ids", "start_positions", "end_positions"], dummy_labels=True, shuffle=False, batch_size=16, collate_fn=data_collator, )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>tf_train_set = tokenized_squad[<span class="hljs-string">&quot;train&quot;</span>].to_tf_dataset( <span class="hljs-meta">... </span> columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;start_positions&quot;</span>, <span class="hljs-string">&quot;end_positions&quot;</span>], <span class="hljs-meta">... </span> dummy_labels=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_validation_set = tokenized_squad[<span class="hljs-string">&quot;validation&quot;</span>].to_tf_dataset( <span class="hljs-meta">... </span> columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;start_positions&quot;</span>, <span class="hljs-string">&quot;end_positions&quot;</span>], <span class="hljs-meta">... </span> dummy_labels=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>)`}}),Ps=new C({props:{code:`from transformers import create_optimizer batch_size = 16 num_epochs = 2 total_train_steps = (len(tokenized_squad["train"]) // batch_size) * num_epochs optimizer, schedule = create_optimizer( init_lr=2e-5, num_warmup_steps=0, num_train_steps=total_train_steps, )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> create_optimizer <span class="hljs-meta">&gt;&gt;&gt; </span>batch_size = <span class="hljs-number">16</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_epochs = <span class="hljs-number">2</span> <span class="hljs-meta">&gt;&gt;&gt; </span>total_train_steps = (<span class="hljs-built_in">len</span>(tokenized_squad[<span class="hljs-string">&quot;train&quot;</span>]) // batch_size) * num_epochs <span class="hljs-meta">&gt;&gt;&gt; </span>optimizer, schedule = create_optimizer( <span class="hljs-meta">... </span> init_lr=<span class="hljs-number">2e-5</span>, <span class="hljs-meta">... </span> num_warmup_steps=<span class="hljs-number">0</span>, <span class="hljs-meta">... </span> num_train_steps=total_train_steps, <span class="hljs-meta">... </span>)`}}),Ss=new C({props:{code:`from transformers import TFAutoModelForQuestionAnswering model = TFAutoModelForQuestionAnswering("distilbert-base-uncased")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForQuestionAnswering(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)`}}),Qs=new C({props:{code:`import tensorflow as tf model.compile(optimizer=optimizer)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>model.<span class="hljs-built_in">compile</span>(optimizer=optimizer)`}}),Ms=new C({props:{code:"model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=3)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=<span class="hljs-number">3</span>)'}}),ls=new He({props:{$$slots:{default:[Ko]},$$scope:{ctx:F}}}),{c(){u=r("meta"),b=h(),d=r("h1"),_=r("a"),y=r("span"),w(m.$$.fragment),g=h(),E=r("span"),A=a("Question answering"),k=h(),w(T.$$.fragment),O=h(),Bs=r("p"),Ve=a("Question answering tasks return an answer given a question. There are two common forms of question answering:"),Rt=h(),J=r("ul"),lt=r("li"),Ye=a("Extractive: extract the answer from the given context."),Je=h(),it=r("li"),Ge=a("Abstractive: generate an answer from the context that correctly answers the question."),Ut=h(),Q=r("p"),Ke=a("This guide will show you how to fine-tune "),cs=r("a"),We=a("DistilBERT"),Xe=a(" on the "),us=r("a"),Ze=a("SQuAD"),sa=a(" dataset for extractive question answering."),Ht=h(),w(G.$$.fragment),Vt=h(),R=r("h2"),K=r("a"),pt=r("span"),w(ds.$$.fragment),ta=h(),ht=r("span"),ea=a("Load SQuAD dataset"),Yt=h(),Ns=r("p"),aa=a("Load the SQuAD dataset from the \u{1F917} Datasets library:"),Jt=h(),w(ms.$$.fragment),Gt=h(),Rs=r("p"),na=a("Then take a look at an example:"),Kt=h(),w(_s.$$.fragment),Wt=h(),L=r("p"),oa=a("The "),ft=r("code"),ra=a("answers"),la=a(" field is a dictionary containing the starting position of the answer and the "),ct=r("code"),ia=a("text"),pa=a(" of the answer."),Xt=h(),U=r("h2"),W=r("a"),ut=r("span"),w(gs.$$.fragment),ha=h(),dt=r("span"),fa=a("Preprocess"),Zt=h(),w(ws.$$.fragment),se=h(),M=r("p"),ca=a("Load the DistilBERT tokenizer to process the "),mt=r("code"),ua=a("question"),da=a(" and "),_t=r("code"),ma=a("context"),_a=a(" fields:"),te=h(),w(js.$$.fragment),ee=h(),Us=r("p"),ga=a("There are a few preprocessing steps particular to question answering that you should be aware of:"),ae=h(),I=r("ol"),P=r("li"),wa=a("Some examples in a dataset may have a very long "),gt=r("code"),ja=a("context"),$a=a(" that exceeds the maximum input length of the model. Truncate only the "),wt=r("code"),va=a("context"),xa=a(" by setting "),jt=r("code"),qa=a('truncation="only_second"'),ba=a("."),ka=h(),H=r("li"),ya=a("Next, map the start and end positions of the answer to the original "),$t=r("code"),Ea=a("context"),Aa=a(` by setting `),vt=r("code"),Ta=a("return_offset_mapping=True"),Da=a("."),za=h(),S=r("li"),Ca=a("With the mapping in hand, you can find the start and end tokens of the answer. Use the "),$s=r("a"),xt=r("code"),Fa=a("sequence_ids"),Pa=a(` method to find which part of the offset corresponds to the `),qt=r("code"),Sa=a("question"),Oa=a(" and which corresponds to the "),bt=r("code"),Qa=a("context"),La=a("."),ne=h(),X=r("p"),Ma=a("Here is how you can create a function to truncate and map the start and end tokens of the answer to the "),kt=r("code"),Ia=a("context"),Ba=a(":"),oe=h(),w(vs.$$.fragment),re=h(),D=r("p"),Na=a("Use \u{1F917} Datasets "),xs=r("a"),yt=r("code"),Ra=a("map"),Ua=a(" function to apply the preprocessing function over the entire dataset. You can speed up the "),Et=r("code"),Ha=a("map"),Va=a(" function by setting "),At=r("code"),Ya=a("batched=True"),Ja=a(" to process multiple elements of the dataset at once. Remove the columns you don\u2019t need:"),le=h(),w(qs.$$.fragment),ie=h(),B=r("p"),Ga=a("Use "),Hs=r("a"),Ka=a("DefaultDataCollator"),Wa=a(" to create a batch of examples. Unlike other data collators in \u{1F917} Transformers, the "),Tt=r("code"),Xa=a("DefaultDataCollator"),Za=a(" does not apply additional preprocessing such as padding."),pe=h(),w(bs.$$.fragment),he=h(),V=r("h2"),Z=r("a"),Dt=r("span"),w(ks.$$.fragment),sn=h(),zt=r("span"),tn=a("Fine-tune with Trainer"),fe=h(),ss=r("p"),en=a("Load DistilBERT with "),Vs=r("a"),an=a("AutoModelForQuestionAnswering"),nn=a(":"),ce=h(),w(ys.$$.fragment),ue=h(),w(ts.$$.fragment),de=h(),Ys=r("p"),on=a("At this point, only three steps remain:"),me=h(),N=r("ol"),Es=r("li"),rn=a("Define your training hyperparameters in "),Js=r("a"),ln=a("TrainingArguments"),pn=a("."),hn=h(),As=r("li"),fn=a("Pass the training arguments to "),Gs=r("a"),cn=a("Trainer"),un=a(" along with the model, dataset, tokenizer, and data collator."),dn=h(),Ts=r("li"),mn=a("Call "),Ks=r("a"),_n=a("train()"),gn=a(" to fine-tune your model."),_e=h(),w(Ds.$$.fragment),ge=h(),Y=r("h2"),es=r("a"),Ct=r("span"),w(zs.$$.fragment),wn=h(),Ft=r("span"),jn=a("Fine-tune with TensorFlow"),we=h(),Ws=r("p"),$n=a("To fine-tune a model in TensorFlow is just as easy, with only a few differences."),je=h(),w(as.$$.fragment),$e=h(),z=r("p"),vn=a("Convert your datasets to the "),Pt=r("code"),xn=a("tf.data.Dataset"),qn=a(" format with "),Cs=r("a"),St=r("code"),bn=a("to_tf_dataset"),kn=a(". Specify inputs and the start and end positions of an answer in "),Ot=r("code"),yn=a("columns"),En=a(", whether to shuffle the dataset order, batch size, and the data collator:"),ve=h(),w(Fs.$$.fragment),xe=h(),Xs=r("p"),An=a("Set up an optimizer function, learning rate schedule, and some training hyperparameters:"),qe=h(),w(Ps.$$.fragment),be=h(),ns=r("p"),Tn=a("Load DistilBERT with "),Zs=r("a"),Dn=a("TFAutoModelForQuestionAnswering"),zn=a(":"),ke=h(),w(Ss.$$.fragment),ye=h(),os=r("p"),Cn=a("Configure the model for training with "),Os=r("a"),Qt=r("code"),Fn=a("compile"),Pn=a(":"),Ee=h(),w(Qs.$$.fragment),Ae=h(),rs=r("p"),Sn=a("Call "),Ls=r("a"),Lt=r("code"),On=a("fit"),Qn=a(" to fine-tune the model:"),Te=h(),w(Ms.$$.fragment),De=h(),w(ls.$$.fragment),this.h()},l(s){const o=Ho('[data-svelte="svelte-1phssyn"]',document.head);u=l(o,"META",{name:!0,content:!0}),o.forEach(e),b=f(s),d=l(s,"H1",{class:!0});var Is=i(d);_=l(Is,"A",{id:!0,class:!0,href:!0});var Mt=i(_);y=l(Mt,"SPAN",{});var It=i(y);j(m.$$.fragment,It),It.forEach(e),Mt.forEach(e),g=f(Is),E=l(Is,"SPAN",{});var Bt=i(E);A=n(Bt,"Question answering"),Bt.forEach(e),Is.forEach(e),k=f(s),j(T.$$.fragment,s),O=f(s),Bs=l(s,"P",{});var Ln=i(Bs);Ve=n(Ln,"Question answering tasks return an answer given a question. There are two common forms of question answering:"),Ln.forEach(e),Rt=f(s),J=l(s,"UL",{});var Ce=i(J);lt=l(Ce,"LI",{});var Mn=i(lt);Ye=n(Mn,"Extractive: extract the answer from the given context."),Mn.forEach(e),Je=f(Ce),it=l(Ce,"LI",{});var In=i(it);Ge=n(In,"Abstractive: generate an answer from the context that correctly answers the question."),In.forEach(e),Ce.forEach(e),Ut=f(s),Q=l(s,"P",{});var st=i(Q);Ke=n(st,"This guide will show you how to fine-tune "),cs=l(st,"A",{href:!0,rel:!0});var Bn=i(cs);We=n(Bn,"DistilBERT"),Bn.forEach(e),Xe=n(st," on the "),us=l(st,"A",{href:!0,rel:!0});var Nn=i(us);Ze=n(Nn,"SQuAD"),Nn.forEach(e),sa=n(st," dataset for extractive question answering."),st.forEach(e),Ht=f(s),j(G.$$.fragment,s),Vt=f(s),R=l(s,"H2",{class:!0});var Fe=i(R);K=l(Fe,"A",{id:!0,class:!0,href:!0});var Rn=i(K);pt=l(Rn,"SPAN",{});var Un=i(pt);j(ds.$$.fragment,Un),Un.forEach(e),Rn.forEach(e),ta=f(Fe),ht=l(Fe,"SPAN",{});var Hn=i(ht);ea=n(Hn,"Load SQuAD dataset"),Hn.forEach(e),Fe.forEach(e),Yt=f(s),Ns=l(s,"P",{});var Vn=i(Ns);aa=n(Vn,"Load the SQuAD dataset from the \u{1F917} Datasets library:"),Vn.forEach(e),Jt=f(s),j(ms.$$.fragment,s),Gt=f(s),Rs=l(s,"P",{});var Yn=i(Rs);na=n(Yn,"Then take a look at an example:"),Yn.forEach(e),Kt=f(s),j(_s.$$.fragment,s),Wt=f(s),L=l(s,"P",{});var tt=i(L);oa=n(tt,"The "),ft=l(tt,"CODE",{});var Jn=i(ft);ra=n(Jn,"answers"),Jn.forEach(e),la=n(tt," field is a dictionary containing the starting position of the answer and the "),ct=l(tt,"CODE",{});var Gn=i(ct);ia=n(Gn,"text"),Gn.forEach(e),pa=n(tt," of the answer."),tt.forEach(e),Xt=f(s),U=l(s,"H2",{class:!0});var Pe=i(U);W=l(Pe,"A",{id:!0,class:!0,href:!0});var Kn=i(W);ut=l(Kn,"SPAN",{});var Wn=i(ut);j(gs.$$.fragment,Wn),Wn.forEach(e),Kn.forEach(e),ha=f(Pe),dt=l(Pe,"SPAN",{});var Xn=i(dt);fa=n(Xn,"Preprocess"),Xn.forEach(e),Pe.forEach(e),Zt=f(s),j(ws.$$.fragment,s),se=f(s),M=l(s,"P",{});var et=i(M);ca=n(et,"Load the DistilBERT tokenizer to process the "),mt=l(et,"CODE",{});var Zn=i(mt);ua=n(Zn,"question"),Zn.forEach(e),da=n(et," and "),_t=l(et,"CODE",{});var so=i(_t);ma=n(so,"context"),so.forEach(e),_a=n(et," fields:"),et.forEach(e),te=f(s),j(js.$$.fragment,s),ee=f(s),Us=l(s,"P",{});var to=i(Us);ga=n(to,"There are a few preprocessing steps particular to question answering that you should be aware of:"),to.forEach(e),ae=f(s),I=l(s,"OL",{});var at=i(I);P=l(at,"LI",{});var is=i(P);wa=n(is,"Some examples in a dataset may have a very long "),gt=l(is,"CODE",{});var eo=i(gt);ja=n(eo,"context"),eo.forEach(e),$a=n(is," that exceeds the maximum input length of the model. Truncate only the "),wt=l(is,"CODE",{});var ao=i(wt);va=n(ao,"context"),ao.forEach(e),xa=n(is," by setting "),jt=l(is,"CODE",{});var no=i(jt);qa=n(no,'truncation="only_second"'),no.forEach(e),ba=n(is,"."),is.forEach(e),ka=f(at),H=l(at,"LI",{});var nt=i(H);ya=n(nt,"Next, map the start and end positions of the answer to the original "),$t=l(nt,"CODE",{});var oo=i($t);Ea=n(oo,"context"),oo.forEach(e),Aa=n(nt,` by setting `),vt=l(nt,"CODE",{});var ro=i(vt);Ta=n(ro,"return_offset_mapping=True"),ro.forEach(e),Da=n(nt,"."),nt.forEach(e),za=f(at),S=l(at,"LI",{});var ps=i(S);Ca=n(ps,"With the mapping in hand, you can find the start and end tokens of the answer. Use the "),$s=l(ps,"A",{href:!0,rel:!0});var lo=i($s);xt=l(lo,"CODE",{});var io=i(xt);Fa=n(io,"sequence_ids"),io.forEach(e),lo.forEach(e),Pa=n(ps,` method to find which part of the offset corresponds to the `),qt=l(ps,"CODE",{});var po=i(qt);Sa=n(po,"question"),po.forEach(e),Oa=n(ps," and which corresponds to the "),bt=l(ps,"CODE",{});var ho=i(bt);Qa=n(ho,"context"),ho.forEach(e),La=n(ps,"."),ps.forEach(e),at.forEach(e),ne=f(s),X=l(s,"P",{});var Se=i(X);Ma=n(Se,"Here is how you can create a function to truncate and map the start and end tokens of the answer to the "),kt=l(Se,"CODE",{});var fo=i(kt);Ia=n(fo,"context"),fo.forEach(e),Ba=n(Se,":"),Se.forEach(e),oe=f(s),j(vs.$$.fragment,s),re=f(s),D=l(s,"P",{});var hs=i(D);Na=n(hs,"Use \u{1F917} Datasets "),xs=l(hs,"A",{href:!0,rel:!0});var co=i(xs);yt=l(co,"CODE",{});var uo=i(yt);Ra=n(uo,"map"),uo.forEach(e),co.forEach(e),Ua=n(hs," function to apply the preprocessing function over the entire dataset. You can speed up the "),Et=l(hs,"CODE",{});var mo=i(Et);Ha=n(mo,"map"),mo.forEach(e),Va=n(hs," function by setting "),At=l(hs,"CODE",{});var _o=i(At);Ya=n(_o,"batched=True"),_o.forEach(e),Ja=n(hs," to process multiple elements of the dataset at once. Remove the columns you don\u2019t need:"),hs.forEach(e),le=f(s),j(qs.$$.fragment,s),ie=f(s),B=l(s,"P",{});var ot=i(B);Ga=n(ot,"Use "),Hs=l(ot,"A",{href:!0});var go=i(Hs);Ka=n(go,"DefaultDataCollator"),go.forEach(e),Wa=n(ot," to create a batch of examples. Unlike other data collators in \u{1F917} Transformers, the "),Tt=l(ot,"CODE",{});var wo=i(Tt);Xa=n(wo,"DefaultDataCollator"),wo.forEach(e),Za=n(ot," does not apply additional preprocessing such as padding."),ot.forEach(e),pe=f(s),j(bs.$$.fragment,s),he=f(s),V=l(s,"H2",{class:!0});var Oe=i(V);Z=l(Oe,"A",{id:!0,class:!0,href:!0});var jo=i(Z);Dt=l(jo,"SPAN",{});var $o=i(Dt);j(ks.$$.fragment,$o),$o.forEach(e),jo.forEach(e),sn=f(Oe),zt=l(Oe,"SPAN",{});var vo=i(zt);tn=n(vo,"Fine-tune with Trainer"),vo.forEach(e),Oe.forEach(e),fe=f(s),ss=l(s,"P",{});var Qe=i(ss);en=n(Qe,"Load DistilBERT with "),Vs=l(Qe,"A",{href:!0});var xo=i(Vs);an=n(xo,"AutoModelForQuestionAnswering"),xo.forEach(e),nn=n(Qe,":"),Qe.forEach(e),ce=f(s),j(ys.$$.fragment,s),ue=f(s),j(ts.$$.fragment,s),de=f(s),Ys=l(s,"P",{});var qo=i(Ys);on=n(qo,"At this point, only three steps remain:"),qo.forEach(e),me=f(s),N=l(s,"OL",{});var rt=i(N);Es=l(rt,"LI",{});var Le=i(Es);rn=n(Le,"Define your training hyperparameters in "),Js=l(Le,"A",{href:!0});var bo=i(Js);ln=n(bo,"TrainingArguments"),bo.forEach(e),pn=n(Le,"."),Le.forEach(e),hn=f(rt),As=l(rt,"LI",{});var Me=i(As);fn=n(Me,"Pass the training arguments to "),Gs=l(Me,"A",{href:!0});var ko=i(Gs);cn=n(ko,"Trainer"),ko.forEach(e),un=n(Me," along with the model, dataset, tokenizer, and data collator."),Me.forEach(e),dn=f(rt),Ts=l(rt,"LI",{});var Ie=i(Ts);mn=n(Ie,"Call "),Ks=l(Ie,"A",{href:!0});var yo=i(Ks);_n=n(yo,"train()"),yo.forEach(e),gn=n(Ie," to fine-tune your model."),Ie.forEach(e),rt.forEach(e),_e=f(s),j(Ds.$$.fragment,s),ge=f(s),Y=l(s,"H2",{class:!0});var Be=i(Y);es=l(Be,"A",{id:!0,class:!0,href:!0});var Eo=i(es);Ct=l(Eo,"SPAN",{});var Ao=i(Ct);j(zs.$$.fragment,Ao),Ao.forEach(e),Eo.forEach(e),wn=f(Be),Ft=l(Be,"SPAN",{});var To=i(Ft);jn=n(To,"Fine-tune with TensorFlow"),To.forEach(e),Be.forEach(e),we=f(s),Ws=l(s,"P",{});var Do=i(Ws);$n=n(Do,"To fine-tune a model in TensorFlow is just as easy, with only a few differences."),Do.forEach(e),je=f(s),j(as.$$.fragment,s),$e=f(s),z=l(s,"P",{});var fs=i(z);vn=n(fs,"Convert your datasets to the "),Pt=l(fs,"CODE",{});var zo=i(Pt);xn=n(zo,"tf.data.Dataset"),zo.forEach(e),qn=n(fs," format with "),Cs=l(fs,"A",{href:!0,rel:!0});var Co=i(Cs);St=l(Co,"CODE",{});var Fo=i(St);bn=n(Fo,"to_tf_dataset"),Fo.forEach(e),Co.forEach(e),kn=n(fs,". Specify inputs and the start and end positions of an answer in "),Ot=l(fs,"CODE",{});var Po=i(Ot);yn=n(Po,"columns"),Po.forEach(e),En=n(fs,", whether to shuffle the dataset order, batch size, and the data collator:"),fs.forEach(e),ve=f(s),j(Fs.$$.fragment,s),xe=f(s),Xs=l(s,"P",{});var So=i(Xs);An=n(So,"Set up an optimizer function, learning rate schedule, and some training hyperparameters:"),So.forEach(e),qe=f(s),j(Ps.$$.fragment,s),be=f(s),ns=l(s,"P",{});var Ne=i(ns);Tn=n(Ne,"Load DistilBERT with "),Zs=l(Ne,"A",{href:!0});var Oo=i(Zs);Dn=n(Oo,"TFAutoModelForQuestionAnswering"),Oo.forEach(e),zn=n(Ne,":"),Ne.forEach(e),ke=f(s),j(Ss.$$.fragment,s),ye=f(s),os=l(s,"P",{});var Re=i(os);Cn=n(Re,"Configure the model for training with "),Os=l(Re,"A",{href:!0,rel:!0});var Qo=i(Os);Qt=l(Qo,"CODE",{});var Lo=i(Qt);Fn=n(Lo,"compile"),Lo.forEach(e),Qo.forEach(e),Pn=n(Re,":"),Re.forEach(e),Ee=f(s),j(Qs.$$.fragment,s),Ae=f(s),rs=l(s,"P",{});var Ue=i(rs);Sn=n(Ue,"Call "),Ls=l(Ue,"A",{href:!0,rel:!0});var Mo=i(Ls);Lt=l(Mo,"CODE",{});var Io=i(Lt);On=n(Io,"fit"),Io.forEach(e),Mo.forEach(e),Qn=n(Ue," to fine-tune the model:"),Ue.forEach(e),Te=f(s),j(Ms.$$.fragment,s),De=f(s),j(ls.$$.fragment,s),this.h()},h(){c(u,"name","hf:doc:metadata"),c(u,"content",JSON.stringify(Xo)),c(_,"id","question-answering"),c(_,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(_,"href","#question-answering"),c(d,"class","relative group"),c(cs,"href","https://huggingface.co/distilbert-base-uncased"),c(cs,"rel","nofollow"),c(us,"href","https://huggingface.co/datasets/squad"),c(us,"rel","nofollow"),c(K,"id","load-squad-dataset"),c(K,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(K,"href","#load-squad-dataset"),c(R,"class","relative group"),c(W,"id","preprocess"),c(W,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(W,"href","#preprocess"),c(U,"class","relative group"),c($s,"href","https://huggingface.co/docs/tokenizers/python/latest/api/reference.html#tokenizers.Encoding.sequence_ids"),c($s,"rel","nofollow"),c(xs,"href","https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map"),c(xs,"rel","nofollow"),c(Hs,"href","/docs/transformers/pr_16143/en/main_classes/data_collator#transformers.DefaultDataCollator"),c(Z,"id","finetune-with-trainer"),c(Z,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(Z,"href","#finetune-with-trainer"),c(V,"class","relative group"),c(Vs,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoModelForQuestionAnswering"),c(Js,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments"),c(Gs,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),c(Ks,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer.train"),c(es,"id","finetune-with-tensorflow"),c(es,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(es,"href","#finetune-with-tensorflow"),c(Y,"class","relative group"),c(Cs,"href","https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.to_tf_dataset"),c(Cs,"rel","nofollow"),c(Zs,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.TFAutoModelForQuestionAnswering"),c(Os,"href","https://keras.io/api/models/model_training_apis/#compile-method"),c(Os,"rel","nofollow"),c(Ls,"href","https://keras.io/api/models/model_training_apis/#fit-method"),c(Ls,"rel","nofollow")},m(s,o){t(document.head,u),p(s,b,o),p(s,d,o),t(d,_),t(_,y),$(m,y,null),t(d,g),t(d,E),t(E,A),p(s,k,o),$(T,s,o),p(s,O,o),p(s,Bs,o),t(Bs,Ve),p(s,Rt,o),p(s,J,o),t(J,lt),t(lt,Ye),t(J,Je),t(J,it),t(it,Ge),p(s,Ut,o),p(s,Q,o),t(Q,Ke),t(Q,cs),t(cs,We),t(Q,Xe),t(Q,us),t(us,Ze),t(Q,sa),p(s,Ht,o),$(G,s,o),p(s,Vt,o),p(s,R,o),t(R,K),t(K,pt),$(ds,pt,null),t(R,ta),t(R,ht),t(ht,ea),p(s,Yt,o),p(s,Ns,o),t(Ns,aa),p(s,Jt,o),$(ms,s,o),p(s,Gt,o),p(s,Rs,o),t(Rs,na),p(s,Kt,o),$(_s,s,o),p(s,Wt,o),p(s,L,o),t(L,oa),t(L,ft),t(ft,ra),t(L,la),t(L,ct),t(ct,ia),t(L,pa),p(s,Xt,o),p(s,U,o),t(U,W),t(W,ut),$(gs,ut,null),t(U,ha),t(U,dt),t(dt,fa),p(s,Zt,o),$(ws,s,o),p(s,se,o),p(s,M,o),t(M,ca),t(M,mt),t(mt,ua),t(M,da),t(M,_t),t(_t,ma),t(M,_a),p(s,te,o),$(js,s,o),p(s,ee,o),p(s,Us,o),t(Us,ga),p(s,ae,o),p(s,I,o),t(I,P),t(P,wa),t(P,gt),t(gt,ja),t(P,$a),t(P,wt),t(wt,va),t(P,xa),t(P,jt),t(jt,qa),t(P,ba),t(I,ka),t(I,H),t(H,ya),t(H,$t),t($t,Ea),t(H,Aa),t(H,vt),t(vt,Ta),t(H,Da),t(I,za),t(I,S),t(S,Ca),t(S,$s),t($s,xt),t(xt,Fa),t(S,Pa),t(S,qt),t(qt,Sa),t(S,Oa),t(S,bt),t(bt,Qa),t(S,La),p(s,ne,o),p(s,X,o),t(X,Ma),t(X,kt),t(kt,Ia),t(X,Ba),p(s,oe,o),$(vs,s,o),p(s,re,o),p(s,D,o),t(D,Na),t(D,xs),t(xs,yt),t(yt,Ra),t(D,Ua),t(D,Et),t(Et,Ha),t(D,Va),t(D,At),t(At,Ya),t(D,Ja),p(s,le,o),$(qs,s,o),p(s,ie,o),p(s,B,o),t(B,Ga),t(B,Hs),t(Hs,Ka),t(B,Wa),t(B,Tt),t(Tt,Xa),t(B,Za),p(s,pe,o),$(bs,s,o),p(s,he,o),p(s,V,o),t(V,Z),t(Z,Dt),$(ks,Dt,null),t(V,sn),t(V,zt),t(zt,tn),p(s,fe,o),p(s,ss,o),t(ss,en),t(ss,Vs),t(Vs,an),t(ss,nn),p(s,ce,o),$(ys,s,o),p(s,ue,o),$(ts,s,o),p(s,de,o),p(s,Ys,o),t(Ys,on),p(s,me,o),p(s,N,o),t(N,Es),t(Es,rn),t(Es,Js),t(Js,ln),t(Es,pn),t(N,hn),t(N,As),t(As,fn),t(As,Gs),t(Gs,cn),t(As,un),t(N,dn),t(N,Ts),t(Ts,mn),t(Ts,Ks),t(Ks,_n),t(Ts,gn),p(s,_e,o),$(Ds,s,o),p(s,ge,o),p(s,Y,o),t(Y,es),t(es,Ct),$(zs,Ct,null),t(Y,wn),t(Y,Ft),t(Ft,jn),p(s,we,o),p(s,Ws,o),t(Ws,$n),p(s,je,o),$(as,s,o),p(s,$e,o),p(s,z,o),t(z,vn),t(z,Pt),t(Pt,xn),t(z,qn),t(z,Cs),t(Cs,St),t(St,bn),t(z,kn),t(z,Ot),t(Ot,yn),t(z,En),p(s,ve,o),$(Fs,s,o),p(s,xe,o),p(s,Xs,o),t(Xs,An),p(s,qe,o),$(Ps,s,o),p(s,be,o),p(s,ns,o),t(ns,Tn),t(ns,Zs),t(Zs,Dn),t(ns,zn),p(s,ke,o),$(Ss,s,o),p(s,ye,o),p(s,os,o),t(os,Cn),t(os,Os),t(Os,Qt),t(Qt,Fn),t(os,Pn),p(s,Ee,o),$(Qs,s,o),p(s,Ae,o),p(s,rs,o),t(rs,Sn),t(rs,Ls),t(Ls,Lt),t(Lt,On),t(rs,Qn),p(s,Te,o),$(Ms,s,o),p(s,De,o),$(ls,s,o),ze=!0},p(s,[o]){const Is={};o&2&&(Is.$$scope={dirty:o,ctx:s}),G.$set(Is);const Mt={};o&2&&(Mt.$$scope={dirty:o,ctx:s}),ts.$set(Mt);const It={};o&2&&(It.$$scope={dirty:o,ctx:s}),as.$set(It);const Bt={};o&2&&(Bt.$$scope={dirty:o,ctx:s}),ls.$set(Bt)},i(s){ze||(v(m.$$.fragment,s),v(T.$$.fragment,s),v(G.$$.fragment,s),v(ds.$$.fragment,s),v(ms.$$.fragment,s),v(_s.$$.fragment,s),v(gs.$$.fragment,s),v(ws.$$.fragment,s),v(js.$$.fragment,s),v(vs.$$.fragment,s),v(qs.$$.fragment,s),v(bs.$$.fragment,s),v(ks.$$.fragment,s),v(ys.$$.fragment,s),v(ts.$$.fragment,s),v(Ds.$$.fragment,s),v(zs.$$.fragment,s),v(as.$$.fragment,s),v(Fs.$$.fragment,s),v(Ps.$$.fragment,s),v(Ss.$$.fragment,s),v(Qs.$$.fragment,s),v(Ms.$$.fragment,s),v(ls.$$.fragment,s),ze=!0)},o(s){x(m.$$.fragment,s),x(T.$$.fragment,s),x(G.$$.fragment,s),x(ds.$$.fragment,s),x(ms.$$.fragment,s),x(_s.$$.fragment,s),x(gs.$$.fragment,s),x(ws.$$.fragment,s),x(js.$$.fragment,s),x(vs.$$.fragment,s),x(qs.$$.fragment,s),x(bs.$$.fragment,s),x(ks.$$.fragment,s),x(ys.$$.fragment,s),x(ts.$$.fragment,s),x(Ds.$$.fragment,s),x(zs.$$.fragment,s),x(as.$$.fragment,s),x(Fs.$$.fragment,s),x(Ps.$$.fragment,s),x(Ss.$$.fragment,s),x(Qs.$$.fragment,s),x(Ms.$$.fragment,s),x(ls.$$.fragment,s),ze=!1},d(s){e(u),s&&e(b),s&&e(d),q(m),s&&e(k),q(T,s),s&&e(O),s&&e(Bs),s&&e(Rt),s&&e(J),s&&e(Ut),s&&e(Q),s&&e(Ht),q(G,s),s&&e(Vt),s&&e(R),q(ds),s&&e(Yt),s&&e(Ns),s&&e(Jt),q(ms,s),s&&e(Gt),s&&e(Rs),s&&e(Kt),q(_s,s),s&&e(Wt),s&&e(L),s&&e(Xt),s&&e(U),q(gs),s&&e(Zt),q(ws,s),s&&e(se),s&&e(M),s&&e(te),q(js,s),s&&e(ee),s&&e(Us),s&&e(ae),s&&e(I),s&&e(ne),s&&e(X),s&&e(oe),q(vs,s),s&&e(re),s&&e(D),s&&e(le),q(qs,s),s&&e(ie),s&&e(B),s&&e(pe),q(bs,s),s&&e(he),s&&e(V),q(ks),s&&e(fe),s&&e(ss),s&&e(ce),q(ys,s),s&&e(ue),q(ts,s),s&&e(de),s&&e(Ys),s&&e(me),s&&e(N),s&&e(_e),q(Ds,s),s&&e(ge),s&&e(Y),q(zs),s&&e(we),s&&e(Ws),s&&e(je),q(as,s),s&&e($e),s&&e(z),s&&e(ve),q(Fs,s),s&&e(xe),s&&e(Xs),s&&e(qe),q(Ps,s),s&&e(be),s&&e(ns),s&&e(ke),q(Ss,s),s&&e(ye),s&&e(os),s&&e(Ee),q(Qs,s),s&&e(Ae),s&&e(rs),s&&e(Te),q(Ms,s),s&&e(De),q(ls,s)}}}const Xo={local:"question-answering",sections:[{local:"load-squad-dataset",title:"Load SQuAD dataset"},{local:"preprocess",title:"Preprocess"},{local:"finetune-with-trainer",title:"Fine-tune with Trainer"},{local:"finetune-with-tensorflow",title:"Fine-tune with TensorFlow"}],title:"Question answering"};function Zo(F,u,b){let{fw:d}=u;return F.$$set=_=>{"fw"in _&&b(0,d=_.fw)},[d]}class lr extends No{constructor(u){super();Ro(this,u,Zo,Wo,Uo,{fw:0})}}export{lr as default,Xo as metadata};
412
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/tasks/sequence_classification.mdx-d07399b9.js
import{S as Xo,i as Zo,s as er,e as l,k as h,w as k,t as o,M as tr,c as n,d as a,m as c,a as i,x as y,h as r,b as m,F as t,g as p,y as j,q as E,o as T,B as x}from"../../chunks/vendor-4833417e.js";import{T as xt}from"../../chunks/Tip-fffd6df1.js";import{Y as ar}from"../../chunks/Youtube-27813aed.js";import{I as zt}from"../../chunks/IconCopyLink-4b81c553.js";import{C as F}from"../../chunks/CodeBlock-6a3d1b46.js";import{C as sr}from"../../chunks/CodeBlockFw-27a176a0.js";import"../../chunks/CopyButton-dacfbfaf.js";function or(C){let f,g,d,_,b;return{c(){f=l("p"),g=o("See the text classification "),d=l("a"),_=o("task page"),b=o(" for more information about other forms of text classification and their associated models, datasets, and metrics."),this.h()},l(u){f=n(u,"P",{});var $=i(f);g=r($,"See the text classification "),d=n($,"A",{href:!0,rel:!0});var w=i(d);_=r(w,"task page"),w.forEach(a),b=r($," for more information about other forms of text classification and their associated models, datasets, and metrics."),$.forEach(a),this.h()},h(){m(d,"href","https://huggingface.co/tasks/text-classification"),m(d,"rel","nofollow")},m(u,$){p(u,f,$),t(f,g),t(f,d),t(d,_),t(f,b)},d(u){u&&a(f)}}}function rr(C){let f,g,d,_,b,u,$,w;return{c(){f=l("p"),g=o("If you aren\u2019t familiar with fine-tuning a model with the "),d=l("a"),_=o("Trainer"),b=o(", take a look at the basic tutorial "),u=l("a"),$=o("here"),w=o("!"),this.h()},l(z){f=n(z,"P",{});var v=i(f);g=r(v,"If you aren\u2019t familiar with fine-tuning a model with the "),d=n(v,"A",{href:!0});var q=i(d);_=r(q,"Trainer"),q.forEach(a),b=r(v,", take a look at the basic tutorial "),u=n(v,"A",{href:!0});var S=i(u);$=r(S,"here"),S.forEach(a),w=r(v,"!"),v.forEach(a),this.h()},h(){m(d,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),m(u,"href","training#finetune-with-trainer")},m(z,v){p(z,f,v),t(f,g),t(f,d),t(d,_),t(f,b),t(f,u),t(u,$),t(f,w)},d(z){z&&a(f)}}}function lr(C){let f,g,d,_,b,u,$;return{c(){f=l("p"),g=l("a"),d=o("Trainer"),_=o(" will apply dynamic padding by default when you pass "),b=l("code"),u=o("tokenizer"),$=o(" to it. In this case, you don\u2019t need to specify a data collator explicitly."),this.h()},l(w){f=n(w,"P",{});var z=i(f);g=n(z,"A",{href:!0});var v=i(g);d=r(v,"Trainer"),v.forEach(a),_=r(z," will apply dynamic padding by default when you pass "),b=n(z,"CODE",{});var q=i(b);u=r(q,"tokenizer"),q.forEach(a),$=r(z," to it. In this case, you don\u2019t need to specify a data collator explicitly."),z.forEach(a),this.h()},h(){m(g,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer")},m(w,z){p(w,f,z),t(f,g),t(g,d),t(f,_),t(f,b),t(b,u),t(f,$)},d(w){w&&a(f)}}}function nr(C){let f,g,d,_,b;return{c(){f=l("p"),g=o("If you aren\u2019t familiar with fine-tuning a model with Keras, take a look at the basic tutorial "),d=l("a"),_=o("here"),b=o("!"),this.h()},l(u){f=n(u,"P",{});var $=i(f);g=r($,"If you aren\u2019t familiar with fine-tuning a model with Keras, take a look at the basic tutorial "),d=n($,"A",{href:!0});var w=i(d);_=r(w,"here"),w.forEach(a),b=r($,"!"),$.forEach(a),this.h()},h(){m(d,"href","training#finetune-with-keras")},m(u,$){p(u,f,$),t(f,g),t(f,d),t(d,_),t(f,b)},d(u){u&&a(f)}}}function ir(C){let f,g,d,_,b,u,$,w;return{c(){f=l("p"),g=o(`For a more in-depth example of how to fine-tune a model for text classification, take a look at the corresponding `),d=l("a"),_=o("PyTorch notebook"),b=o(` or `),u=l("a"),$=o("TensorFlow notebook"),w=o("."),this.h()},l(z){f=n(z,"P",{});var v=i(f);g=r(v,`For a more in-depth example of how to fine-tune a model for text classification, take a look at the corresponding `),d=n(v,"A",{href:!0,rel:!0});var q=i(d);_=r(q,"PyTorch notebook"),q.forEach(a),b=r(v,` or `),u=n(v,"A",{href:!0,rel:!0});var S=i(u);$=r(S,"TensorFlow notebook"),S.forEach(a),w=r(v,"."),v.forEach(a),this.h()},h(){m(d,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/text_classification.ipynb"),m(d,"rel","nofollow"),m(u,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/text_classification-tf.ipynb"),m(u,"rel","nofollow")},m(z,v){p(z,f,v),t(f,g),t(f,d),t(d,_),t(f,b),t(f,u),t(u,$),t(f,w)},d(z){z&&a(f)}}}function pr(C){let f,g,d,_,b,u,$,w,z,v,q,S,Ie,Aa,qt,I,Ca,ie,Da,Pa,pe,Fa,Sa,At,U,Ct,N,H,Xe,fe,Ia,Ze,Ma,Dt,Me,Oa,Pt,he,Ft,Oe,La,St,ce,It,Le,Na,Mt,Y,Ne,et,Ba,Wa,Ra,M,tt,Ua,Ha,at,Ya,Ga,st,Ka,Va,Ot,B,G,ot,me,Ja,rt,Qa,Lt,K,Xa,lt,Za,es,Nt,de,Bt,V,ts,nt,as,ss,Wt,ue,Rt,D,os,_e,it,rs,ls,pt,ns,is,ft,ps,fs,Ut,ge,Ht,A,hs,Be,cs,ms,ht,ds,us,ct,_s,gs,mt,$s,bs,Yt,$e,Gt,W,J,dt,be,ws,ut,vs,Kt,Q,ks,We,ys,js,Vt,we,Jt,X,Qt,Re,Es,Xt,O,ve,Ts,Ue,xs,zs,qs,ke,As,He,Cs,Ds,Ps,ye,Fs,Ye,Ss,Is,Zt,je,ea,Z,ta,R,ee,_t,Ee,Ms,gt,Os,aa,Ge,Ls,sa,te,oa,P,Ns,$t,Bs,Ws,Te,bt,Rs,Us,wt,Hs,Ys,ra,xe,la,Ke,Gs,na,ze,ia,ae,Ks,Ve,Vs,Js,pa,qe,fa,se,Qs,Ae,vt,Xs,Zs,ha,Ce,ca,oe,eo,De,kt,to,ao,ma,Pe,da,re,ua;return u=new zt({}),q=new ar({props:{id:"leNG9fN9FQU"}}),U=new xt({props:{$$slots:{default:[or]},$$scope:{ctx:C}}}),fe=new zt({}),he=new F({props:{code:`from datasets import load_dataset imdb = load_dataset("imdb")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>imdb = load_dataset(<span class="hljs-string">&quot;imdb&quot;</span>)`}}),ce=new F({props:{code:'imdb["test"][0]',highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>imdb[<span class="hljs-string">&quot;test&quot;</span>][<span class="hljs-number">0</span>] { <span class="hljs-string">&quot;label&quot;</span>: <span class="hljs-number">0</span>, <span class="hljs-string">&quot;text&quot;</span>: <span class="hljs-string">&quot;I love sci-fi and am willing to put up with a lot. Sci-fi movies/TV are usually underfunded, under-appreciated and misunderstood. I tried to like this, I really did, but it is to good TV sci-fi as Babylon 5 is to Star Trek (the original). Silly prosthetics, cheap cardboard sets, stilted dialogues, CG that doesn&#x27;t match the background, and painfully one-dimensional characters cannot be overcome with a &#x27;sci-fi&#x27; setting. (I&#x27;m sure there are those of you out there who think Babylon 5 is good sci-fi TV. It&#x27;s not. It&#x27;s clich\xE9d and uninspiring.) While US viewers might like emotion and character development, sci-fi is a genre that does not take itself seriously (cf. Star Trek). It may treat important issues, yet not as a serious philosophy. It&#x27;s really difficult to care about the characters here as they are not simply foolish, just missing a spark of life. Their actions and reactions are wooden and predictable, often painful to watch. The makers of Earth KNOW it&#x27;s rubbish as they have to always say \\&quot;Gene Roddenberry&#x27;s Earth...\\&quot; otherwise people would not continue watching. Roddenberry&#x27;s ashes must be turning in their orbit as this dull, cheap, poorly edited (watching it without advert breaks really brings this home) trudging Trabant of a show lumbers into space. Spoiler. So, kill off a main character. And then bring him back as another actor. Jeeez! Dallas all over again.&quot;</span>, }`}}),me=new zt({}),de=new F({props:{code:`from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)`}}),ue=new F({props:{code:`def preprocess_function(examples): return tokenizer(examples["text"], truncation=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">preprocess_function</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> tokenizer(examples[<span class="hljs-string">&quot;text&quot;</span>], truncation=<span class="hljs-literal">True</span>)`}}),ge=new F({props:{code:"tokenized_imdb = imdb.map(preprocess_function, batched=True)",highlighted:'tokenized_imdb = imdb.<span class="hljs-built_in">map</span>(preprocess_function, batched=<span class="hljs-literal">True</span>)'}}),$e=new sr({props:{group1:{id:"pt",code:`from transformers import DataCollatorWithPadding data_collator = DataCollatorWithPadding(tokenizer=tokenizer)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorWithPadding <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorWithPadding(tokenizer=tokenizer)`},group2:{id:"tf",code:`from transformers import DataCollatorWithPadding data_collator = DataCollatorWithPadding(tokenizer=tokenizer, return_tensors="tf")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorWithPadding <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorWithPadding(tokenizer=tokenizer, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)`}}}),be=new zt({}),we=new F({props:{code:`from transformers import AutoModelForSequenceClassification, TrainingArguments, Trainer model = AutoModelForSequenceClassification.from_pretrained("distilbert-base-uncased", num_labels=2)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForSequenceClassification, TrainingArguments, Trainer <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>, num_labels=<span class="hljs-number">2</span>)`}}),X=new xt({props:{$$slots:{default:[rr]},$$scope:{ctx:C}}}),je=new F({props:{code:`training_args = TrainingArguments( output_dir="./results", learning_rate=2e-5, per_device_train_batch_size=16, per_device_eval_batch_size=16, num_train_epochs=5, weight_decay=0.01, ) trainer = Trainer( model=model, args=training_args, train_dataset=tokenized_imdb["train"], eval_dataset=tokenized_imdb["test"], tokenizer=tokenizer, data_collator=data_collator, ) trainer.train()`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">2e-5</span>, <span class="hljs-meta">... </span> per_device_train_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> per_device_eval_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> num_train_epochs=<span class="hljs-number">5</span>, <span class="hljs-meta">... </span> weight_decay=<span class="hljs-number">0.01</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=tokenized_imdb[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=tokenized_imdb[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> tokenizer=tokenizer, <span class="hljs-meta">... </span> data_collator=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()`}}),Z=new xt({props:{$$slots:{default:[lr]},$$scope:{ctx:C}}}),Ee=new zt({}),te=new xt({props:{$$slots:{default:[nr]},$$scope:{ctx:C}}}),xe=new F({props:{code:`tf_train_dataset = tokenized_imdb["train"].to_tf_dataset( columns=["attention_mask", "input_ids", "label"], shuffle=True, batch_size=16, collate_fn=data_collator, ) tf_validation_dataset = tokenized_imdb["train"].to_tf_dataset( columns=["attention_mask", "input_ids", "label"], shuffle=False, batch_size=16, collate_fn=data_collator, )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>tf_train_dataset = tokenized_imdb[<span class="hljs-string">&quot;train&quot;</span>].to_tf_dataset( <span class="hljs-meta">... </span> columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;label&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_validation_dataset = tokenized_imdb[<span class="hljs-string">&quot;train&quot;</span>].to_tf_dataset( <span class="hljs-meta">... </span> columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;label&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>)`}}),ze=new F({props:{code:`from transformers import create_optimizer import tensorflow as tf batch_size = 16 num_epochs = 5 batches_per_epoch = len(tokenized_imdb["train"]) // batch_size total_train_steps = int(batches_per_epoch * num_epochs) optimizer, schedule = create_optimizer(init_lr=2e-5, num_warmup_steps=0, num_train_steps=total_train_steps)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> create_optimizer <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>batch_size = <span class="hljs-number">16</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_epochs = <span class="hljs-number">5</span> <span class="hljs-meta">&gt;&gt;&gt; </span>batches_per_epoch = <span class="hljs-built_in">len</span>(tokenized_imdb[<span class="hljs-string">&quot;train&quot;</span>]) // batch_size <span class="hljs-meta">&gt;&gt;&gt; </span>total_train_steps = <span class="hljs-built_in">int</span>(batches_per_epoch * num_epochs) <span class="hljs-meta">&gt;&gt;&gt; </span>optimizer, schedule = create_optimizer(init_lr=<span class="hljs-number">2e-5</span>, num_warmup_steps=<span class="hljs-number">0</span>, num_train_steps=total_train_steps)`}}),qe=new F({props:{code:`from transformers import TFAutoModelForSequenceClassification model = TFAutoModelForSequenceClassification.from_pretrained("distilbert-base-uncased", num_labels=2)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>, num_labels=<span class="hljs-number">2</span>)`}}),Ce=new F({props:{code:`import tensorflow as tf model.compile(optimizer=optimizer)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>model.<span class="hljs-built_in">compile</span>(optimizer=optimizer)`}}),Pe=new F({props:{code:"model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=3)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=<span class="hljs-number">3</span>)'}}),re=new xt({props:{$$slots:{default:[ir]},$$scope:{ctx:C}}}),{c(){f=l("meta"),g=h(),d=l("h1"),_=l("a"),b=l("span"),k(u.$$.fragment),$=h(),w=l("span"),z=o("Text classification"),v=h(),k(q.$$.fragment),S=h(),Ie=l("p"),Aa=o("Text classification is a common NLP task that assigns a label or class to text. There are many practical applications of text classification widely used in production by some of today\u2019s largest companies. One of the most popular forms of text classification is sentiment analysis, which assigns a label like positive, negative, or neutral to a sequence of text."),qt=h(),I=l("p"),Ca=o("This guide will show you how to fine-tune "),ie=l("a"),Da=o("DistilBERT"),Pa=o(" on the "),pe=l("a"),Fa=o("IMDb"),Sa=o(" dataset to determine whether a movie review is positive or negative."),At=h(),k(U.$$.fragment),Ct=h(),N=l("h2"),H=l("a"),Xe=l("span"),k(fe.$$.fragment),Ia=h(),Ze=l("span"),Ma=o("Load IMDb dataset"),Dt=h(),Me=l("p"),Oa=o("Load the IMDb dataset from the \u{1F917} Datasets library:"),Pt=h(),k(he.$$.fragment),Ft=h(),Oe=l("p"),La=o("Then take a look at an example:"),St=h(),k(ce.$$.fragment),It=h(),Le=l("p"),Na=o("There are two fields in this dataset:"),Mt=h(),Y=l("ul"),Ne=l("li"),et=l("code"),Ba=o("text"),Wa=o(": a string containing the text of the movie review."),Ra=h(),M=l("li"),tt=l("code"),Ua=o("label"),Ha=o(": a value that can either be "),at=l("code"),Ya=o("0"),Ga=o(" for a negative review or "),st=l("code"),Ka=o("1"),Va=o(" for a positive review."),Ot=h(),B=l("h2"),G=l("a"),ot=l("span"),k(me.$$.fragment),Ja=h(),rt=l("span"),Qa=o("Preprocess"),Lt=h(),K=l("p"),Xa=o("Load the DistilBERT tokenizer to process the "),lt=l("code"),Za=o("text"),es=o(" field:"),Nt=h(),k(de.$$.fragment),Bt=h(),V=l("p"),ts=o("Create a preprocessing function to tokenize "),nt=l("code"),as=o("text"),ss=o(" and truncate sequences to be no longer than DistilBERT\u2019s maximum input length:"),Wt=h(),k(ue.$$.fragment),Rt=h(),D=l("p"),os=o("Use \u{1F917} Datasets "),_e=l("a"),it=l("code"),rs=o("map"),ls=o(" function to apply the preprocessing function over the entire dataset. You can speed up the "),pt=l("code"),ns=o("map"),is=o(" function by setting "),ft=l("code"),ps=o("batched=True"),fs=o(" to process multiple elements of the dataset at once:"),Ut=h(),k(ge.$$.fragment),Ht=h(),A=l("p"),hs=o("Use "),Be=l("a"),cs=o("DataCollatorWithPadding"),ms=o(" to create a batch of examples. It will also "),ht=l("em"),ds=o("dynamically pad"),us=o(" your text to the length of the longest element in its batch, so they are a uniform length. While it is possible to pad your text in the "),ct=l("code"),_s=o("tokenizer"),gs=o(" function by setting "),mt=l("code"),$s=o("padding=True"),bs=o(", dynamic padding is more efficient."),Yt=h(),k($e.$$.fragment),Gt=h(),W=l("h2"),J=l("a"),dt=l("span"),k(be.$$.fragment),ws=h(),ut=l("span"),vs=o("Fine-tune with Trainer"),Kt=h(),Q=l("p"),ks=o("Load DistilBERT with "),We=l("a"),ys=o("AutoModelForSequenceClassification"),js=o(" along with the number of expected labels:"),Vt=h(),k(we.$$.fragment),Jt=h(),k(X.$$.fragment),Qt=h(),Re=l("p"),Es=o("At this point, only three steps remain:"),Xt=h(),O=l("ol"),ve=l("li"),Ts=o("Define your training hyperparameters in "),Ue=l("a"),xs=o("TrainingArguments"),zs=o("."),qs=h(),ke=l("li"),As=o("Pass the training arguments to "),He=l("a"),Cs=o("Trainer"),Ds=o(" along with the model, dataset, tokenizer, and data collator."),Ps=h(),ye=l("li"),Fs=o("Call "),Ye=l("a"),Ss=o("train()"),Is=o(" to fine-tune your model."),Zt=h(),k(je.$$.fragment),ea=h(),k(Z.$$.fragment),ta=h(),R=l("h2"),ee=l("a"),_t=l("span"),k(Ee.$$.fragment),Ms=h(),gt=l("span"),Os=o("Fine-tune with TensorFlow"),aa=h(),Ge=l("p"),Ls=o("To fine-tune a model in TensorFlow is just as easy, with only a few differences."),sa=h(),k(te.$$.fragment),oa=h(),P=l("p"),Ns=o("Convert your datasets to the "),$t=l("code"),Bs=o("tf.data.Dataset"),Ws=o(" format with "),Te=l("a"),bt=l("code"),Rs=o("to_tf_dataset"),Us=o(". Specify inputs and labels in "),wt=l("code"),Hs=o("columns"),Ys=o(", whether to shuffle the dataset order, batch size, and the data collator:"),ra=h(),k(xe.$$.fragment),la=h(),Ke=l("p"),Gs=o("Set up an optimizer function, learning rate schedule, and some training hyperparameters:"),na=h(),k(ze.$$.fragment),ia=h(),ae=l("p"),Ks=o("Load DistilBERT with "),Ve=l("a"),Vs=o("TFAutoModelForSequenceClassification"),Js=o(" along with the number of expected labels:"),pa=h(),k(qe.$$.fragment),fa=h(),se=l("p"),Qs=o("Configure the model for training with "),Ae=l("a"),vt=l("code"),Xs=o("compile"),Zs=o(":"),ha=h(),k(Ce.$$.fragment),ca=h(),oe=l("p"),eo=o("Call "),De=l("a"),kt=l("code"),to=o("fit"),ao=o(" to fine-tune the model:"),ma=h(),k(Pe.$$.fragment),da=h(),k(re.$$.fragment),this.h()},l(e){const s=tr('[data-svelte="svelte-1phssyn"]',document.head);f=n(s,"META",{name:!0,content:!0}),s.forEach(a),g=c(e),d=n(e,"H1",{class:!0});var Fe=i(d);_=n(Fe,"A",{id:!0,class:!0,href:!0});var yt=i(_);b=n(yt,"SPAN",{});var jt=i(b);y(u.$$.fragment,jt),jt.forEach(a),yt.forEach(a),$=c(Fe),w=n(Fe,"SPAN",{});var Et=i(w);z=r(Et,"Text classification"),Et.forEach(a),Fe.forEach(a),v=c(e),y(q.$$.fragment,e),S=c(e),Ie=n(e,"P",{});var Tt=i(Ie);Aa=r(Tt,"Text classification is a common NLP task that assigns a label or class to text. There are many practical applications of text classification widely used in production by some of today\u2019s largest companies. One of the most popular forms of text classification is sentiment analysis, which assigns a label like positive, negative, or neutral to a sequence of text."),Tt.forEach(a),qt=c(e),I=n(e,"P",{});var Je=i(I);Ca=r(Je,"This guide will show you how to fine-tune "),ie=n(Je,"A",{href:!0,rel:!0});var oo=i(ie);Da=r(oo,"DistilBERT"),oo.forEach(a),Pa=r(Je," on the "),pe=n(Je,"A",{href:!0,rel:!0});var ro=i(pe);Fa=r(ro,"IMDb"),ro.forEach(a),Sa=r(Je," dataset to determine whether a movie review is positive or negative."),Je.forEach(a),At=c(e),y(U.$$.fragment,e),Ct=c(e),N=n(e,"H2",{class:!0});var _a=i(N);H=n(_a,"A",{id:!0,class:!0,href:!0});var lo=i(H);Xe=n(lo,"SPAN",{});var no=i(Xe);y(fe.$$.fragment,no),no.forEach(a),lo.forEach(a),Ia=c(_a),Ze=n(_a,"SPAN",{});var io=i(Ze);Ma=r(io,"Load IMDb dataset"),io.forEach(a),_a.forEach(a),Dt=c(e),Me=n(e,"P",{});var po=i(Me);Oa=r(po,"Load the IMDb dataset from the \u{1F917} Datasets library:"),po.forEach(a),Pt=c(e),y(he.$$.fragment,e),Ft=c(e),Oe=n(e,"P",{});var fo=i(Oe);La=r(fo,"Then take a look at an example:"),fo.forEach(a),St=c(e),y(ce.$$.fragment,e),It=c(e),Le=n(e,"P",{});var ho=i(Le);Na=r(ho,"There are two fields in this dataset:"),ho.forEach(a),Mt=c(e),Y=n(e,"UL",{});var ga=i(Y);Ne=n(ga,"LI",{});var so=i(Ne);et=n(so,"CODE",{});var co=i(et);Ba=r(co,"text"),co.forEach(a),Wa=r(so,": a string containing the text of the movie review."),so.forEach(a),Ra=c(ga),M=n(ga,"LI",{});var Se=i(M);tt=n(Se,"CODE",{});var mo=i(tt);Ua=r(mo,"label"),mo.forEach(a),Ha=r(Se,": a value that can either be "),at=n(Se,"CODE",{});var uo=i(at);Ya=r(uo,"0"),uo.forEach(a),Ga=r(Se," for a negative review or "),st=n(Se,"CODE",{});var _o=i(st);Ka=r(_o,"1"),_o.forEach(a),Va=r(Se," for a positive review."),Se.forEach(a),ga.forEach(a),Ot=c(e),B=n(e,"H2",{class:!0});var $a=i(B);G=n($a,"A",{id:!0,class:!0,href:!0});var go=i(G);ot=n(go,"SPAN",{});var $o=i(ot);y(me.$$.fragment,$o),$o.forEach(a),go.forEach(a),Ja=c($a),rt=n($a,"SPAN",{});var bo=i(rt);Qa=r(bo,"Preprocess"),bo.forEach(a),$a.forEach(a),Lt=c(e),K=n(e,"P",{});var ba=i(K);Xa=r(ba,"Load the DistilBERT tokenizer to process the "),lt=n(ba,"CODE",{});var wo=i(lt);Za=r(wo,"text"),wo.forEach(a),es=r(ba," field:"),ba.forEach(a),Nt=c(e),y(de.$$.fragment,e),Bt=c(e),V=n(e,"P",{});var wa=i(V);ts=r(wa,"Create a preprocessing function to tokenize "),nt=n(wa,"CODE",{});var vo=i(nt);as=r(vo,"text"),vo.forEach(a),ss=r(wa," and truncate sequences to be no longer than DistilBERT\u2019s maximum input length:"),wa.forEach(a),Wt=c(e),y(ue.$$.fragment,e),Rt=c(e),D=n(e,"P",{});var le=i(D);os=r(le,"Use \u{1F917} Datasets "),_e=n(le,"A",{href:!0,rel:!0});var ko=i(_e);it=n(ko,"CODE",{});var yo=i(it);rs=r(yo,"map"),yo.forEach(a),ko.forEach(a),ls=r(le," function to apply the preprocessing function over the entire dataset. You can speed up the "),pt=n(le,"CODE",{});var jo=i(pt);ns=r(jo,"map"),jo.forEach(a),is=r(le," function by setting "),ft=n(le,"CODE",{});var Eo=i(ft);ps=r(Eo,"batched=True"),Eo.forEach(a),fs=r(le," to process multiple elements of the dataset at once:"),le.forEach(a),Ut=c(e),y(ge.$$.fragment,e),Ht=c(e),A=n(e,"P",{});var L=i(A);hs=r(L,"Use "),Be=n(L,"A",{href:!0});var To=i(Be);cs=r(To,"DataCollatorWithPadding"),To.forEach(a),ms=r(L," to create a batch of examples. It will also "),ht=n(L,"EM",{});var xo=i(ht);ds=r(xo,"dynamically pad"),xo.forEach(a),us=r(L," your text to the length of the longest element in its batch, so they are a uniform length. While it is possible to pad your text in the "),ct=n(L,"CODE",{});var zo=i(ct);_s=r(zo,"tokenizer"),zo.forEach(a),gs=r(L," function by setting "),mt=n(L,"CODE",{});var qo=i(mt);$s=r(qo,"padding=True"),qo.forEach(a),bs=r(L,", dynamic padding is more efficient."),L.forEach(a),Yt=c(e),y($e.$$.fragment,e),Gt=c(e),W=n(e,"H2",{class:!0});var va=i(W);J=n(va,"A",{id:!0,class:!0,href:!0});var Ao=i(J);dt=n(Ao,"SPAN",{});var Co=i(dt);y(be.$$.fragment,Co),Co.forEach(a),Ao.forEach(a),ws=c(va),ut=n(va,"SPAN",{});var Do=i(ut);vs=r(Do,"Fine-tune with Trainer"),Do.forEach(a),va.forEach(a),Kt=c(e),Q=n(e,"P",{});var ka=i(Q);ks=r(ka,"Load DistilBERT with "),We=n(ka,"A",{href:!0});var Po=i(We);ys=r(Po,"AutoModelForSequenceClassification"),Po.forEach(a),js=r(ka," along with the number of expected labels:"),ka.forEach(a),Vt=c(e),y(we.$$.fragment,e),Jt=c(e),y(X.$$.fragment,e),Qt=c(e),Re=n(e,"P",{});var Fo=i(Re);Es=r(Fo,"At this point, only three steps remain:"),Fo.forEach(a),Xt=c(e),O=n(e,"OL",{});var Qe=i(O);ve=n(Qe,"LI",{});var ya=i(ve);Ts=r(ya,"Define your training hyperparameters in "),Ue=n(ya,"A",{href:!0});var So=i(Ue);xs=r(So,"TrainingArguments"),So.forEach(a),zs=r(ya,"."),ya.forEach(a),qs=c(Qe),ke=n(Qe,"LI",{});var ja=i(ke);As=r(ja,"Pass the training arguments to "),He=n(ja,"A",{href:!0});var Io=i(He);Cs=r(Io,"Trainer"),Io.forEach(a),Ds=r(ja," along with the model, dataset, tokenizer, and data collator."),ja.forEach(a),Ps=c(Qe),ye=n(Qe,"LI",{});var Ea=i(ye);Fs=r(Ea,"Call "),Ye=n(Ea,"A",{href:!0});var Mo=i(Ye);Ss=r(Mo,"train()"),Mo.forEach(a),Is=r(Ea," to fine-tune your model."),Ea.forEach(a),Qe.forEach(a),Zt=c(e),y(je.$$.fragment,e),ea=c(e),y(Z.$$.fragment,e),ta=c(e),R=n(e,"H2",{class:!0});var Ta=i(R);ee=n(Ta,"A",{id:!0,class:!0,href:!0});var Oo=i(ee);_t=n(Oo,"SPAN",{});var Lo=i(_t);y(Ee.$$.fragment,Lo),Lo.forEach(a),Oo.forEach(a),Ms=c(Ta),gt=n(Ta,"SPAN",{});var No=i(gt);Os=r(No,"Fine-tune with TensorFlow"),No.forEach(a),Ta.forEach(a),aa=c(e),Ge=n(e,"P",{});var Bo=i(Ge);Ls=r(Bo,"To fine-tune a model in TensorFlow is just as easy, with only a few differences."),Bo.forEach(a),sa=c(e),y(te.$$.fragment,e),oa=c(e),P=n(e,"P",{});var ne=i(P);Ns=r(ne,"Convert your datasets to the "),$t=n(ne,"CODE",{});var Wo=i($t);Bs=r(Wo,"tf.data.Dataset"),Wo.forEach(a),Ws=r(ne," format with "),Te=n(ne,"A",{href:!0,rel:!0});var Ro=i(Te);bt=n(Ro,"CODE",{});var Uo=i(bt);Rs=r(Uo,"to_tf_dataset"),Uo.forEach(a),Ro.forEach(a),Us=r(ne,". Specify inputs and labels in "),wt=n(ne,"CODE",{});var Ho=i(wt);Hs=r(Ho,"columns"),Ho.forEach(a),Ys=r(ne,", whether to shuffle the dataset order, batch size, and the data collator:"),ne.forEach(a),ra=c(e),y(xe.$$.fragment,e),la=c(e),Ke=n(e,"P",{});var Yo=i(Ke);Gs=r(Yo,"Set up an optimizer function, learning rate schedule, and some training hyperparameters:"),Yo.forEach(a),na=c(e),y(ze.$$.fragment,e),ia=c(e),ae=n(e,"P",{});var xa=i(ae);Ks=r(xa,"Load DistilBERT with "),Ve=n(xa,"A",{href:!0});var Go=i(Ve);Vs=r(Go,"TFAutoModelForSequenceClassification"),Go.forEach(a),Js=r(xa," along with the number of expected labels:"),xa.forEach(a),pa=c(e),y(qe.$$.fragment,e),fa=c(e),se=n(e,"P",{});var za=i(se);Qs=r(za,"Configure the model for training with "),Ae=n(za,"A",{href:!0,rel:!0});var Ko=i(Ae);vt=n(Ko,"CODE",{});var Vo=i(vt);Xs=r(Vo,"compile"),Vo.forEach(a),Ko.forEach(a),Zs=r(za,":"),za.forEach(a),ha=c(e),y(Ce.$$.fragment,e),ca=c(e),oe=n(e,"P",{});var qa=i(oe);eo=r(qa,"Call "),De=n(qa,"A",{href:!0,rel:!0});var Jo=i(De);kt=n(Jo,"CODE",{});var Qo=i(kt);to=r(Qo,"fit"),Qo.forEach(a),Jo.forEach(a),ao=r(qa," to fine-tune the model:"),qa.forEach(a),ma=c(e),y(Pe.$$.fragment,e),da=c(e),y(re.$$.fragment,e),this.h()},h(){m(f,"name","hf:doc:metadata"),m(f,"content",JSON.stringify(fr)),m(_,"id","text-classification"),m(_,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(_,"href","#text-classification"),m(d,"class","relative group"),m(ie,"href","https://huggingface.co/distilbert-base-uncased"),m(ie,"rel","nofollow"),m(pe,"href","https://huggingface.co/datasets/imdb"),m(pe,"rel","nofollow"),m(H,"id","load-imdb-dataset"),m(H,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(H,"href","#load-imdb-dataset"),m(N,"class","relative group"),m(G,"id","preprocess"),m(G,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(G,"href","#preprocess"),m(B,"class","relative group"),m(_e,"href","https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map"),m(_e,"rel","nofollow"),m(Be,"href","/docs/transformers/pr_16143/en/main_classes/data_collator#transformers.DataCollatorWithPadding"),m(J,"id","finetune-with-trainer"),m(J,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(J,"href","#finetune-with-trainer"),m(W,"class","relative group"),m(We,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoModelForSequenceClassification"),m(Ue,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments"),m(He,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),m(Ye,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer.train"),m(ee,"id","finetune-with-tensorflow"),m(ee,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(ee,"href","#finetune-with-tensorflow"),m(R,"class","relative group"),m(Te,"href","https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.to_tf_dataset"),m(Te,"rel","nofollow"),m(Ve,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.TFAutoModelForSequenceClassification"),m(Ae,"href","https://keras.io/api/models/model_training_apis/#compile-method"),m(Ae,"rel","nofollow"),m(De,"href","https://keras.io/api/models/model_training_apis/#fit-method"),m(De,"rel","nofollow")},m(e,s){t(document.head,f),p(e,g,s),p(e,d,s),t(d,_),t(_,b),j(u,b,null),t(d,$),t(d,w),t(w,z),p(e,v,s),j(q,e,s),p(e,S,s),p(e,Ie,s),t(Ie,Aa),p(e,qt,s),p(e,I,s),t(I,Ca),t(I,ie),t(ie,Da),t(I,Pa),t(I,pe),t(pe,Fa),t(I,Sa),p(e,At,s),j(U,e,s),p(e,Ct,s),p(e,N,s),t(N,H),t(H,Xe),j(fe,Xe,null),t(N,Ia),t(N,Ze),t(Ze,Ma),p(e,Dt,s),p(e,Me,s),t(Me,Oa),p(e,Pt,s),j(he,e,s),p(e,Ft,s),p(e,Oe,s),t(Oe,La),p(e,St,s),j(ce,e,s),p(e,It,s),p(e,Le,s),t(Le,Na),p(e,Mt,s),p(e,Y,s),t(Y,Ne),t(Ne,et),t(et,Ba),t(Ne,Wa),t(Y,Ra),t(Y,M),t(M,tt),t(tt,Ua),t(M,Ha),t(M,at),t(at,Ya),t(M,Ga),t(M,st),t(st,Ka),t(M,Va),p(e,Ot,s),p(e,B,s),t(B,G),t(G,ot),j(me,ot,null),t(B,Ja),t(B,rt),t(rt,Qa),p(e,Lt,s),p(e,K,s),t(K,Xa),t(K,lt),t(lt,Za),t(K,es),p(e,Nt,s),j(de,e,s),p(e,Bt,s),p(e,V,s),t(V,ts),t(V,nt),t(nt,as),t(V,ss),p(e,Wt,s),j(ue,e,s),p(e,Rt,s),p(e,D,s),t(D,os),t(D,_e),t(_e,it),t(it,rs),t(D,ls),t(D,pt),t(pt,ns),t(D,is),t(D,ft),t(ft,ps),t(D,fs),p(e,Ut,s),j(ge,e,s),p(e,Ht,s),p(e,A,s),t(A,hs),t(A,Be),t(Be,cs),t(A,ms),t(A,ht),t(ht,ds),t(A,us),t(A,ct),t(ct,_s),t(A,gs),t(A,mt),t(mt,$s),t(A,bs),p(e,Yt,s),j($e,e,s),p(e,Gt,s),p(e,W,s),t(W,J),t(J,dt),j(be,dt,null),t(W,ws),t(W,ut),t(ut,vs),p(e,Kt,s),p(e,Q,s),t(Q,ks),t(Q,We),t(We,ys),t(Q,js),p(e,Vt,s),j(we,e,s),p(e,Jt,s),j(X,e,s),p(e,Qt,s),p(e,Re,s),t(Re,Es),p(e,Xt,s),p(e,O,s),t(O,ve),t(ve,Ts),t(ve,Ue),t(Ue,xs),t(ve,zs),t(O,qs),t(O,ke),t(ke,As),t(ke,He),t(He,Cs),t(ke,Ds),t(O,Ps),t(O,ye),t(ye,Fs),t(ye,Ye),t(Ye,Ss),t(ye,Is),p(e,Zt,s),j(je,e,s),p(e,ea,s),j(Z,e,s),p(e,ta,s),p(e,R,s),t(R,ee),t(ee,_t),j(Ee,_t,null),t(R,Ms),t(R,gt),t(gt,Os),p(e,aa,s),p(e,Ge,s),t(Ge,Ls),p(e,sa,s),j(te,e,s),p(e,oa,s),p(e,P,s),t(P,Ns),t(P,$t),t($t,Bs),t(P,Ws),t(P,Te),t(Te,bt),t(bt,Rs),t(P,Us),t(P,wt),t(wt,Hs),t(P,Ys),p(e,ra,s),j(xe,e,s),p(e,la,s),p(e,Ke,s),t(Ke,Gs),p(e,na,s),j(ze,e,s),p(e,ia,s),p(e,ae,s),t(ae,Ks),t(ae,Ve),t(Ve,Vs),t(ae,Js),p(e,pa,s),j(qe,e,s),p(e,fa,s),p(e,se,s),t(se,Qs),t(se,Ae),t(Ae,vt),t(vt,Xs),t(se,Zs),p(e,ha,s),j(Ce,e,s),p(e,ca,s),p(e,oe,s),t(oe,eo),t(oe,De),t(De,kt),t(kt,to),t(oe,ao),p(e,ma,s),j(Pe,e,s),p(e,da,s),j(re,e,s),ua=!0},p(e,[s]){const Fe={};s&2&&(Fe.$$scope={dirty:s,ctx:e}),U.$set(Fe);const yt={};s&2&&(yt.$$scope={dirty:s,ctx:e}),X.$set(yt);const jt={};s&2&&(jt.$$scope={dirty:s,ctx:e}),Z.$set(jt);const Et={};s&2&&(Et.$$scope={dirty:s,ctx:e}),te.$set(Et);const Tt={};s&2&&(Tt.$$scope={dirty:s,ctx:e}),re.$set(Tt)},i(e){ua||(E(u.$$.fragment,e),E(q.$$.fragment,e),E(U.$$.fragment,e),E(fe.$$.fragment,e),E(he.$$.fragment,e),E(ce.$$.fragment,e),E(me.$$.fragment,e),E(de.$$.fragment,e),E(ue.$$.fragment,e),E(ge.$$.fragment,e),E($e.$$.fragment,e),E(be.$$.fragment,e),E(we.$$.fragment,e),E(X.$$.fragment,e),E(je.$$.fragment,e),E(Z.$$.fragment,e),E(Ee.$$.fragment,e),E(te.$$.fragment,e),E(xe.$$.fragment,e),E(ze.$$.fragment,e),E(qe.$$.fragment,e),E(Ce.$$.fragment,e),E(Pe.$$.fragment,e),E(re.$$.fragment,e),ua=!0)},o(e){T(u.$$.fragment,e),T(q.$$.fragment,e),T(U.$$.fragment,e),T(fe.$$.fragment,e),T(he.$$.fragment,e),T(ce.$$.fragment,e),T(me.$$.fragment,e),T(de.$$.fragment,e),T(ue.$$.fragment,e),T(ge.$$.fragment,e),T($e.$$.fragment,e),T(be.$$.fragment,e),T(we.$$.fragment,e),T(X.$$.fragment,e),T(je.$$.fragment,e),T(Z.$$.fragment,e),T(Ee.$$.fragment,e),T(te.$$.fragment,e),T(xe.$$.fragment,e),T(ze.$$.fragment,e),T(qe.$$.fragment,e),T(Ce.$$.fragment,e),T(Pe.$$.fragment,e),T(re.$$.fragment,e),ua=!1},d(e){a(f),e&&a(g),e&&a(d),x(u),e&&a(v),x(q,e),e&&a(S),e&&a(Ie),e&&a(qt),e&&a(I),e&&a(At),x(U,e),e&&a(Ct),e&&a(N),x(fe),e&&a(Dt),e&&a(Me),e&&a(Pt),x(he,e),e&&a(Ft),e&&a(Oe),e&&a(St),x(ce,e),e&&a(It),e&&a(Le),e&&a(Mt),e&&a(Y),e&&a(Ot),e&&a(B),x(me),e&&a(Lt),e&&a(K),e&&a(Nt),x(de,e),e&&a(Bt),e&&a(V),e&&a(Wt),x(ue,e),e&&a(Rt),e&&a(D),e&&a(Ut),x(ge,e),e&&a(Ht),e&&a(A),e&&a(Yt),x($e,e),e&&a(Gt),e&&a(W),x(be),e&&a(Kt),e&&a(Q),e&&a(Vt),x(we,e),e&&a(Jt),x(X,e),e&&a(Qt),e&&a(Re),e&&a(Xt),e&&a(O),e&&a(Zt),x(je,e),e&&a(ea),x(Z,e),e&&a(ta),e&&a(R),x(Ee),e&&a(aa),e&&a(Ge),e&&a(sa),x(te,e),e&&a(oa),e&&a(P),e&&a(ra),x(xe,e),e&&a(la),e&&a(Ke),e&&a(na),x(ze,e),e&&a(ia),e&&a(ae),e&&a(pa),x(qe,e),e&&a(fa),e&&a(se),e&&a(ha),x(Ce,e),e&&a(ca),e&&a(oe),e&&a(ma),x(Pe,e),e&&a(da),x(re,e)}}}const fr={local:"text-classification",sections:[{local:"load-imdb-dataset",title:"Load IMDb dataset"},{local:"preprocess",title:"Preprocess"},{local:"finetune-with-trainer",title:"Fine-tune with Trainer"},{local:"finetune-with-tensorflow",title:"Fine-tune with TensorFlow"}],title:"Text classification"};function hr(C,f,g){let{fw:d}=f;return C.$$set=_=>{"fw"in _&&g(0,d=_.fw)},[d]}class br extends Xo{constructor(f){super();Zo(this,f,hr,pr,er,{fw:0})}}export{br as default,fr as metadata};
413
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/tasks/language_modeling.mdx-e8a233ab.js
import{S as nf,i as rf,s as of,e as r,k as f,w as g,t as l,M as pf,c as o,d as t,m as h,a as i,x as _,h as n,b as m,F as s,g as p,y as $,q as w,o as k,B as j}from"../../chunks/vendor-4833417e.js";import{T as Es}from"../../chunks/Tip-fffd6df1.js";import{Y as En}from"../../chunks/Youtube-27813aed.js";import{I as fe}from"../../chunks/IconCopyLink-4b81c553.js";import{C as q}from"../../chunks/CodeBlock-6a3d1b46.js";import{C as lf}from"../../chunks/CodeBlockFw-27a176a0.js";import"../../chunks/CopyButton-dacfbfaf.js";function ff(P){let u,E,c,v,x,d,b,A,T,y,F,C,L,W,N,U,Ge,R,Y,he;return{c(){u=r("p"),E=l("You can fine-tune other architectures for language modeling such as "),c=r("a"),v=l("GPT-Neo"),x=l(", "),d=r("a"),b=l("GPT-J"),A=l(", and "),T=r("a"),y=l("BERT"),F=l(", following the same steps presented in this guide!"),C=f(),L=r("p"),W=l("See the text generation "),N=r("a"),U=l("task page"),Ge=l(" and fill mask "),R=r("a"),Y=l("task page"),he=l(" for more information about their associated models, datasets, and metrics."),this.h()},l(M){u=o(M,"P",{});var z=i(u);E=n(z,"You can fine-tune other architectures for language modeling such as "),c=o(z,"A",{href:!0,rel:!0});var He=i(c);v=n(He,"GPT-Neo"),He.forEach(t),x=n(z,", "),d=o(z,"A",{href:!0,rel:!0});var D=i(d);b=n(D,"GPT-J"),D.forEach(t),A=n(z,", and "),T=o(z,"A",{href:!0,rel:!0});var Jt=i(T);y=n(Jt,"BERT"),Jt.forEach(t),F=n(z,", following the same steps presented in this guide!"),z.forEach(t),C=h(M),L=o(M,"P",{});var I=i(L);W=n(I,"See the text generation "),N=o(I,"A",{href:!0,rel:!0});var Kt=i(N);U=n(Kt,"task page"),Kt.forEach(t),Ge=n(I," and fill mask "),R=o(I,"A",{href:!0,rel:!0});var Qt=i(R);Y=n(Qt,"task page"),Qt.forEach(t),he=n(I," for more information about their associated models, datasets, and metrics."),I.forEach(t),this.h()},h(){m(c,"href","https://huggingface.co/EleutherAI/gpt-neo-125M"),m(c,"rel","nofollow"),m(d,"href","https://huggingface.co/EleutherAI/gpt-j-6B"),m(d,"rel","nofollow"),m(T,"href","https://huggingface.co/bert-base-uncased"),m(T,"rel","nofollow"),m(N,"href","https://huggingface.co/tasks/text-generation"),m(N,"rel","nofollow"),m(R,"href","https://huggingface.co/tasks/fill-mask"),m(R,"rel","nofollow")},m(M,z){p(M,u,z),s(u,E),s(u,c),s(c,v),s(u,x),s(u,d),s(d,b),s(u,A),s(u,T),s(T,y),s(u,F),p(M,C,z),p(M,L,z),s(L,W),s(L,N),s(N,U),s(L,Ge),s(L,R),s(R,Y),s(L,he)},d(M){M&&t(u),M&&t(C),M&&t(L)}}}function hf(P){let u,E,c,v,x,d,b,A;return{c(){u=r("p"),E=l("If you aren\u2019t familiar with fine-tuning a model with the "),c=r("a"),v=l("Trainer"),x=l(", take a look at the basic tutorial "),d=r("a"),b=l("here"),A=l("!"),this.h()},l(T){u=o(T,"P",{});var y=i(u);E=n(y,"If you aren\u2019t familiar with fine-tuning a model with the "),c=o(y,"A",{href:!0});var F=i(c);v=n(F,"Trainer"),F.forEach(t),x=n(y,", take a look at the basic tutorial "),d=o(y,"A",{href:!0});var C=i(d);b=n(C,"here"),C.forEach(t),A=n(y,"!"),y.forEach(t),this.h()},h(){m(c,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),m(d,"href","training#finetune-with-trainer")},m(T,y){p(T,u,y),s(u,E),s(u,c),s(c,v),s(u,x),s(u,d),s(d,b),s(u,A)},d(T){T&&t(u)}}}function mf(P){let u,E,c,v,x;return{c(){u=r("p"),E=l("If you aren\u2019t familiar with fine-tuning a model with Keras, take a look at the basic tutorial "),c=r("a"),v=l("here"),x=l("!"),this.h()},l(d){u=o(d,"P",{});var b=i(u);E=n(b,"If you aren\u2019t familiar with fine-tuning a model with Keras, take a look at the basic tutorial "),c=o(b,"A",{href:!0});var A=i(c);v=n(A,"here"),A.forEach(t),x=n(b,"!"),b.forEach(t),this.h()},h(){m(c,"href","training#finetune-with-keras")},m(d,b){p(d,u,b),s(u,E),s(u,c),s(c,v),s(u,x)},d(d){d&&t(u)}}}function uf(P){let u,E,c,v,x,d,b,A;return{c(){u=r("p"),E=l("If you aren\u2019t familiar with fine-tuning a model with the "),c=r("a"),v=l("Trainer"),x=l(", take a look at the basic tutorial "),d=r("a"),b=l("here"),A=l("!"),this.h()},l(T){u=o(T,"P",{});var y=i(u);E=n(y,"If you aren\u2019t familiar with fine-tuning a model with the "),c=o(y,"A",{href:!0});var F=i(c);v=n(F,"Trainer"),F.forEach(t),x=n(y,", take a look at the basic tutorial "),d=o(y,"A",{href:!0});var C=i(d);b=n(C,"here"),C.forEach(t),A=n(y,"!"),y.forEach(t),this.h()},h(){m(c,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),m(d,"href","training#finetune-with-trainer")},m(T,y){p(T,u,y),s(u,E),s(u,c),s(c,v),s(u,x),s(u,d),s(d,b),s(u,A)},d(T){T&&t(u)}}}function cf(P){let u,E,c,v,x;return{c(){u=r("p"),E=l("If you aren\u2019t familiar with fine-tuning a model with Keras, take a look at the basic tutorial "),c=r("a"),v=l("here"),x=l("!"),this.h()},l(d){u=o(d,"P",{});var b=i(u);E=n(b,"If you aren\u2019t familiar with fine-tuning a model with Keras, take a look at the basic tutorial "),c=o(b,"A",{href:!0});var A=i(c);v=n(A,"here"),A.forEach(t),x=n(b,"!"),b.forEach(t),this.h()},h(){m(c,"href","training#finetune-with-keras")},m(d,b){p(d,u,b),s(u,E),s(u,c),s(c,v),s(u,x)},d(d){d&&t(u)}}}function df(P){let u,E,c,v,x,d,b,A;return{c(){u=r("p"),E=l(`For a more in-depth example of how to fine-tune a model for causal language modeling, take a look at the corresponding `),c=r("a"),v=l("PyTorch notebook"),x=l(` or `),d=r("a"),b=l("TensorFlow notebook"),A=l("."),this.h()},l(T){u=o(T,"P",{});var y=i(u);E=n(y,`For a more in-depth example of how to fine-tune a model for causal language modeling, take a look at the corresponding `),c=o(y,"A",{href:!0,rel:!0});var F=i(c);v=n(F,"PyTorch notebook"),F.forEach(t),x=n(y,` or `),d=o(y,"A",{href:!0,rel:!0});var C=i(d);b=n(C,"TensorFlow notebook"),C.forEach(t),A=n(y,"."),y.forEach(t),this.h()},h(){m(c,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/language_modeling.ipynb"),m(c,"rel","nofollow"),m(d,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/language_modeling-tf.ipynb"),m(d,"rel","nofollow")},m(T,y){p(T,u,y),s(u,E),s(u,c),s(c,v),s(u,x),s(u,d),s(d,b),s(u,A)},d(T){T&&t(u)}}}function gf(P){let u,E,c,v,x,d,b,A,T,y,F,C,L,W,N,U,Ge,R,Y,he,M,z,He,D,Jt,I,Kt,Qt,We,xn,Tn,Ye,An,qn,Ue,Fn,Mn,ba,me,Ea,se,ue,xs,Je,Cn,Ts,Dn,xa,Vt,zn,Ta,Ke,Aa,Xt,Pn,qa,Qe,Fa,Zt,Ln,Ma,Ve,Ca,B,In,As,Sn,On,qs,Nn,Rn,Fs,Bn,Gn,Da,ae,ce,Ms,Xe,Hn,Cs,Wn,za,Ze,Pa,de,Yn,Ds,Un,Jn,La,et,Ia,tt,Sa,es,Kn,Oa,st,Na,J,Qn,zs,Vn,Xn,at,Ps,Zn,er,Ra,lt,Ba,K,tr,Ls,sr,ar,Is,lr,nr,Ga,ts,rr,Ha,nt,Wa,S,or,rt,Ss,ir,pr,Os,fr,hr,Ns,mr,ur,Rs,cr,dr,Ya,ot,Ua,ss,gr,Ja,ge,Bs,_r,$r,it,wr,Gs,kr,jr,Ka,pt,Qa,_e,vr,Hs,yr,br,Va,ft,Xa,O,Er,as,xr,Tr,Ws,Ar,qr,Ys,Fr,Mr,Us,Cr,Dr,Za,$e,zr,Js,Pr,Lr,el,ht,tl,Q,Ir,ls,Sr,Or,Ks,Nr,Rr,sl,mt,al,le,we,Qs,ut,Br,Vs,Gr,ll,ke,Hr,ct,Wr,Yr,nl,ne,je,Xs,dt,Ur,Zs,Jr,rl,ve,Kr,ns,Qr,Vr,ol,gt,il,ye,pl,rs,Xr,fl,V,_t,Zr,os,eo,to,so,$t,ao,is,lo,no,ro,wt,oo,ps,io,po,hl,kt,ml,re,be,ea,jt,fo,ta,ho,ul,fs,mo,cl,Ee,dl,G,uo,sa,co,go,vt,aa,_o,$o,la,wo,ko,gl,yt,_l,hs,jo,$l,bt,wl,xe,vo,ms,yo,bo,kl,Et,jl,Te,Eo,xt,na,xo,To,vl,Tt,yl,Ae,Ao,At,ra,qo,Fo,bl,qt,El,oe,qe,oa,Ft,Mo,ia,Co,xl,Fe,Do,Mt,zo,Po,Tl,ie,Me,pa,Ct,Lo,fa,Io,Al,Ce,So,ha,Oo,No,ql,Dt,Fl,De,Ml,us,Ro,Cl,X,zt,Bo,cs,Go,Ho,Wo,Pt,Yo,ds,Uo,Jo,Ko,Lt,Qo,gs,Vo,Xo,Dl,It,zl,pe,ze,ma,St,Zo,ua,ei,Pl,_s,ti,Ll,Pe,Il,H,si,ca,ai,li,Ot,da,ni,ri,ga,oi,ii,Sl,Nt,Ol,$s,pi,Nl,Rt,Rl,Le,fi,ws,hi,mi,Bl,Bt,Gl,Ie,ui,Gt,_a,ci,di,Hl,Ht,Wl,Se,gi,Wt,$a,_i,$i,Yl,Yt,Ul,Oe,Jl;return d=new fe({}),W=new En({props:{id:"Vpjb1lu0MDk"}}),Y=new En({props:{id:"mqElG5QJWUg"}}),me=new Es({props:{$$slots:{default:[ff]},$$scope:{ctx:P}}}),Je=new fe({}),Ke=new q({props:{code:`from datasets import load_dataset eli5 = load_dataset("eli5", split="train_asks[:5000]")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>eli5 = load_dataset(<span class="hljs-string">&quot;eli5&quot;</span>, split=<span class="hljs-string">&quot;train_asks[:5000]&quot;</span>)`}}),Qe=new q({props:{code:"eli5 = eli5.train_test_split(test_size=0.2)",highlighted:'eli5 = eli5.train_test_split(test_size=<span class="hljs-number">0.2</span>)'}}),Ve=new q({props:{code:'eli5["train"][0]',highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>eli5[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;answers&#x27;</span>: {<span class="hljs-string">&#x27;a_id&#x27;</span>: [<span class="hljs-string">&#x27;c3d1aib&#x27;</span>, <span class="hljs-string">&#x27;c3d4lya&#x27;</span>], <span class="hljs-string">&#x27;score&#x27;</span>: [<span class="hljs-number">6</span>, <span class="hljs-number">3</span>], <span class="hljs-string">&#x27;text&#x27;</span>: [<span class="hljs-string">&quot;The velocity needed to remain in orbit is equal to the square root of Newton&#x27;s constant times the mass of earth divided by the distance from the center of the earth. I don&#x27;t know the altitude of that specific mission, but they&#x27;re usually around 300 km. That means he&#x27;s going 7-8 km/s.\\n\\nIn space there are no other forces acting on either the shuttle or the guy, so they stay in the same position relative to each other. If he were to become unable to return to the ship, he would presumably run out of oxygen, or slowly fall into the atmosphere and burn up.&quot;</span>, <span class="hljs-string">&quot;Hope you don&#x27;t mind me asking another question, but why aren&#x27;t there any stars visible in this photo?&quot;</span>]}, <span class="hljs-string">&#x27;answers_urls&#x27;</span>: {<span class="hljs-string">&#x27;url&#x27;</span>: []}, <span class="hljs-string">&#x27;document&#x27;</span>: <span class="hljs-string">&#x27;&#x27;</span>, <span class="hljs-string">&#x27;q_id&#x27;</span>: <span class="hljs-string">&#x27;nyxfp&#x27;</span>, <span class="hljs-string">&#x27;selftext&#x27;</span>: <span class="hljs-string">&#x27;_URL_0_\\n\\nThis was on the front page earlier and I have a few questions about it. Is it possible to calculate how fast the astronaut would be orbiting the earth? Also how does he stay close to the shuttle so that he can return safely, i.e is he orbiting at the same speed and can therefore stay next to it? And finally if his propulsion system failed, would he eventually re-enter the atmosphere and presumably die?&#x27;</span>, <span class="hljs-string">&#x27;selftext_urls&#x27;</span>: {<span class="hljs-string">&#x27;url&#x27;</span>: [<span class="hljs-string">&#x27;http://apod.nasa.gov/apod/image/1201/freeflyer_nasa_3000.jpg&#x27;</span>]}, <span class="hljs-string">&#x27;subreddit&#x27;</span>: <span class="hljs-string">&#x27;askscience&#x27;</span>, <span class="hljs-string">&#x27;title&#x27;</span>: <span class="hljs-string">&#x27;Few questions about this space walk photograph.&#x27;</span>, <span class="hljs-string">&#x27;title_urls&#x27;</span>: {<span class="hljs-string">&#x27;url&#x27;</span>: []}}`}}),Xe=new fe({}),Ze=new En({props:{id:"ma1TrR7gE7I"}}),et=new q({props:{code:`from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("distilgpt2")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilgpt2&quot;</span>)`}}),tt=new En({props:{id:"8PmhEIXhBvI"}}),st=new q({props:{code:`from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("distilroberta-base")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilroberta-base&quot;</span>)`}}),lt=new q({props:{code:`eli5 = eli5.flatten() eli5["train"][0]`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>eli5 = eli5.flatten() <span class="hljs-meta">&gt;&gt;&gt; </span>eli5[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;answers.a_id&#x27;</span>: [<span class="hljs-string">&#x27;c3d1aib&#x27;</span>, <span class="hljs-string">&#x27;c3d4lya&#x27;</span>], <span class="hljs-string">&#x27;answers.score&#x27;</span>: [<span class="hljs-number">6</span>, <span class="hljs-number">3</span>], <span class="hljs-string">&#x27;answers.text&#x27;</span>: [<span class="hljs-string">&quot;The velocity needed to remain in orbit is equal to the square root of Newton&#x27;s constant times the mass of earth divided by the distance from the center of the earth. I don&#x27;t know the altitude of that specific mission, but they&#x27;re usually around 300 km. That means he&#x27;s going 7-8 km/s.\\n\\nIn space there are no other forces acting on either the shuttle or the guy, so they stay in the same position relative to each other. If he were to become unable to return to the ship, he would presumably run out of oxygen, or slowly fall into the atmosphere and burn up.&quot;</span>, <span class="hljs-string">&quot;Hope you don&#x27;t mind me asking another question, but why aren&#x27;t there any stars visible in this photo?&quot;</span>], <span class="hljs-string">&#x27;answers_urls.url&#x27;</span>: [], <span class="hljs-string">&#x27;document&#x27;</span>: <span class="hljs-string">&#x27;&#x27;</span>, <span class="hljs-string">&#x27;q_id&#x27;</span>: <span class="hljs-string">&#x27;nyxfp&#x27;</span>, <span class="hljs-string">&#x27;selftext&#x27;</span>: <span class="hljs-string">&#x27;_URL_0_\\n\\nThis was on the front page earlier and I have a few questions about it. Is it possible to calculate how fast the astronaut would be orbiting the earth? Also how does he stay close to the shuttle so that he can return safely, i.e is he orbiting at the same speed and can therefore stay next to it? And finally if his propulsion system failed, would he eventually re-enter the atmosphere and presumably die?&#x27;</span>, <span class="hljs-string">&#x27;selftext_urls.url&#x27;</span>: [<span class="hljs-string">&#x27;http://apod.nasa.gov/apod/image/1201/freeflyer_nasa_3000.jpg&#x27;</span>], <span class="hljs-string">&#x27;subreddit&#x27;</span>: <span class="hljs-string">&#x27;askscience&#x27;</span>, <span class="hljs-string">&#x27;title&#x27;</span>: <span class="hljs-string">&#x27;Few questions about this space walk photograph.&#x27;</span>, <span class="hljs-string">&#x27;title_urls.url&#x27;</span>: []}`}}),nt=new q({props:{code:`def preprocess_function(examples): return tokenizer([" ".join(x) for x in examples["answers.text"]], truncation=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">preprocess_function</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> tokenizer([<span class="hljs-string">&quot; &quot;</span>.join(x) <span class="hljs-keyword">for</span> x <span class="hljs-keyword">in</span> examples[<span class="hljs-string">&quot;answers.text&quot;</span>]], truncation=<span class="hljs-literal">True</span>)`}}),ot=new q({props:{code:`tokenized_eli5 = eli5.map( preprocess_function, batched=True, num_proc=4, remove_columns=eli5["train"].column_names, )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>tokenized_eli5 = eli5.<span class="hljs-built_in">map</span>( <span class="hljs-meta">... </span> preprocess_function, <span class="hljs-meta">... </span> batched=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> num_proc=<span class="hljs-number">4</span>, <span class="hljs-meta">... </span> remove_columns=eli5[<span class="hljs-string">&quot;train&quot;</span>].column_names, <span class="hljs-meta">... </span>)`}}),pt=new q({props:{code:`block_size = 128 def group_texts(examples): concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()} total_length = len(concatenated_examples[list(examples.keys())[0]]) result = { k: [t[i : i + block_size] for i in range(0, total_length, block_size)] for k, t in concatenated_examples.items() } result["labels"] = result["input_ids"].copy() return result`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>block_size = <span class="hljs-number">128</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">group_texts</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> concatenated_examples = {k: <span class="hljs-built_in">sum</span>(examples[k], []) <span class="hljs-keyword">for</span> k <span class="hljs-keyword">in</span> examples.keys()} <span class="hljs-meta">... </span> total_length = <span class="hljs-built_in">len</span>(concatenated_examples[<span class="hljs-built_in">list</span>(examples.keys())[<span class="hljs-number">0</span>]]) <span class="hljs-meta">... </span> result = { <span class="hljs-meta">... </span> k: [t[i : i + block_size] <span class="hljs-keyword">for</span> i <span class="hljs-keyword">in</span> <span class="hljs-built_in">range</span>(<span class="hljs-number">0</span>, total_length, block_size)] <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> k, t <span class="hljs-keyword">in</span> concatenated_examples.items() <span class="hljs-meta">... </span> } <span class="hljs-meta">... </span> result[<span class="hljs-string">&quot;labels&quot;</span>] = result[<span class="hljs-string">&quot;input_ids&quot;</span>].copy() <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> result`}}),ft=new q({props:{code:"lm_dataset = tokenized_eli5.map(group_texts, batched=True, num_proc=4)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>lm_dataset = tokenized_eli5.<span class="hljs-built_in">map</span>(group_texts, batched=<span class="hljs-literal">True</span>, num_proc=<span class="hljs-number">4</span>)'}}),ht=new lf({props:{group1:{id:"pt",code:`from transformers import DataCollatorForLanguageModeling tokenizer.pad_token = tokenizer.eos_token data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorForLanguageModeling <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.pad_token = tokenizer.eos_token <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=<span class="hljs-literal">False</span>)`},group2:{id:"tf",code:`from transformers import DataCollatorForLanguageModeling data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False, return_tensors="tf")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorForLanguageModeling <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=<span class="hljs-literal">False</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)`}}}),mt=new lf({props:{group1:{id:"pt",code:`from transformers import DataCollatorForLanguageModeling tokenizer.pad_token = tokenizer.eos_token data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=0.15)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorForLanguageModeling <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.pad_token = tokenizer.eos_token <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=<span class="hljs-number">0.15</span>)`},group2:{id:"tf",code:`from transformers import DataCollatorForLanguageModeling data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False, return_tensors="tf")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorForLanguageModeling <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=<span class="hljs-literal">False</span>, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)`}}}),ut=new fe({}),dt=new fe({}),gt=new q({props:{code:`from transformers import AutoModelForCausalLM, TrainingArguments, Trainer model = AutoModelForCausalLM.from_pretrained("distilgpt2")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForCausalLM, TrainingArguments, Trainer <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForCausalLM.from_pretrained(<span class="hljs-string">&quot;distilgpt2&quot;</span>)`}}),ye=new Es({props:{$$slots:{default:[hf]},$$scope:{ctx:P}}}),kt=new q({props:{code:`training_args = TrainingArguments( output_dir="./results", evaluation_strategy="epoch", learning_rate=2e-5, weight_decay=0.01, ) trainer = Trainer( model=model, args=training_args, train_dataset=lm_dataset["train"], eval_dataset=lm_dataset["test"], data_collator=data_collator, ) trainer.train()`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> evaluation_strategy=<span class="hljs-string">&quot;epoch&quot;</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">2e-5</span>, <span class="hljs-meta">... </span> weight_decay=<span class="hljs-number">0.01</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=lm_dataset[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=lm_dataset[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> data_collator=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()`}}),jt=new fe({}),Ee=new Es({props:{$$slots:{default:[mf]},$$scope:{ctx:P}}}),yt=new q({props:{code:`tf_train_set = lm_dataset["train"].to_tf_dataset( columns=["attention_mask", "input_ids", "labels"], dummy_labels=True, shuffle=True, batch_size=16, collate_fn=data_collator, ) tf_test_set = lm_dataset["test"].to_tf_dataset( columns=["attention_mask", "input_ids", "labels"], dummy_labels=True, shuffle=False, batch_size=16, collate_fn=data_collator, )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>tf_train_set = lm_dataset[<span class="hljs-string">&quot;train&quot;</span>].to_tf_dataset( <span class="hljs-meta">... </span> columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;labels&quot;</span>], <span class="hljs-meta">... </span> dummy_labels=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_test_set = lm_dataset[<span class="hljs-string">&quot;test&quot;</span>].to_tf_dataset( <span class="hljs-meta">... </span> columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;labels&quot;</span>], <span class="hljs-meta">... </span> dummy_labels=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>)`}}),bt=new q({props:{code:`from transformers import create_optimizer, AdamWeightDecay optimizer = AdamWeightDecay(learning_rate=2e-5, weight_decay_rate=0.01)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> create_optimizer, AdamWeightDecay <span class="hljs-meta">&gt;&gt;&gt; </span>optimizer = AdamWeightDecay(learning_rate=<span class="hljs-number">2e-5</span>, weight_decay_rate=<span class="hljs-number">0.01</span>)`}}),Et=new q({props:{code:`from transformers import TFAutoModelForCausalLM model = TFAutoModelForCausalLM.from_pretrained("distilgpt2")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForCausalLM.from_pretrained(<span class="hljs-string">&quot;distilgpt2&quot;</span>)`}}),Tt=new q({props:{code:`import tensorflow as tf model.compile(optimizer=optimizer)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>model.<span class="hljs-built_in">compile</span>(optimizer=optimizer)`}}),qt=new q({props:{code:"model.fit(x=tf_train_set, validation_data=tf_test_set, epochs=3)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>model.fit(x=tf_train_set, validation_data=tf_test_set, epochs=<span class="hljs-number">3</span>)'}}),Ft=new fe({}),Ct=new fe({}),Dt=new q({props:{code:`from transformers import AutoModelForMaskedLM model = AutoModelForMaskedLM.from_pretrained("distilroberta-base")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForMaskedLM.from_pretrained(<span class="hljs-string">&quot;distilroberta-base&quot;</span>)`}}),De=new Es({props:{$$slots:{default:[uf]},$$scope:{ctx:P}}}),It=new q({props:{code:`training_args = TrainingArguments( output_dir="./results", evaluation_strategy="epoch", learning_rate=2e-5, num_train_epochs=3, weight_decay=0.01, ) trainer = Trainer( model=model, args=training_args, train_dataset=lm_dataset["train"], eval_dataset=lm_dataset["test"], data_collator=data_collator, ) trainer.train()`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> evaluation_strategy=<span class="hljs-string">&quot;epoch&quot;</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">2e-5</span>, <span class="hljs-meta">... </span> num_train_epochs=<span class="hljs-number">3</span>, <span class="hljs-meta">... </span> weight_decay=<span class="hljs-number">0.01</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=lm_dataset[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=lm_dataset[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> data_collator=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()`}}),St=new fe({}),Pe=new Es({props:{$$slots:{default:[cf]},$$scope:{ctx:P}}}),Nt=new q({props:{code:`tf_train_set = lm_dataset["train"].to_tf_dataset( columns=["attention_mask", "input_ids", "labels"], dummy_labels=True, shuffle=True, batch_size=16, collate_fn=data_collator, ) tf_test_set = lm_dataset["test"].to_tf_dataset( columns=["attention_mask", "input_ids", "labels"], dummy_labels=True, shuffle=False, batch_size=16, collate_fn=data_collator, )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>tf_train_set = lm_dataset[<span class="hljs-string">&quot;train&quot;</span>].to_tf_dataset( <span class="hljs-meta">... </span> columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;labels&quot;</span>], <span class="hljs-meta">... </span> dummy_labels=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_test_set = lm_dataset[<span class="hljs-string">&quot;test&quot;</span>].to_tf_dataset( <span class="hljs-meta">... </span> columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;labels&quot;</span>], <span class="hljs-meta">... </span> dummy_labels=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>)`}}),Rt=new q({props:{code:`from transformers import create_optimizer, AdamWeightDecay optimizer = AdamWeightDecay(learning_rate=2e-5, weight_decay_rate=0.01)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> create_optimizer, AdamWeightDecay <span class="hljs-meta">&gt;&gt;&gt; </span>optimizer = AdamWeightDecay(learning_rate=<span class="hljs-number">2e-5</span>, weight_decay_rate=<span class="hljs-number">0.01</span>)`}}),Bt=new q({props:{code:`from transformers import TFAutoModelForMaskedLM model = TFAutoModelForCausalLM.from_pretrained("distilroberta-base")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForCausalLM.from_pretrained(<span class="hljs-string">&quot;distilroberta-base&quot;</span>)`}}),Ht=new q({props:{code:`import tensorflow as tf model.compile(optimizer=optimizer)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>model.<span class="hljs-built_in">compile</span>(optimizer=optimizer)`}}),Yt=new q({props:{code:"model.fit(x=tf_train_set, validation_data=tf_test_set, epochs=3)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>model.fit(x=tf_train_set, validation_data=tf_test_set, epochs=<span class="hljs-number">3</span>)'}}),Oe=new Es({props:{$$slots:{default:[df]},$$scope:{ctx:P}}}),{c(){u=r("meta"),E=f(),c=r("h1"),v=r("a"),x=r("span"),g(d.$$.fragment),b=f(),A=r("span"),T=l("Language modeling"),y=f(),F=r("p"),C=l("Language modeling predicts words in a sentence. There are two forms of language modeling."),L=f(),g(W.$$.fragment),N=f(),U=r("p"),Ge=l("Causal language modeling predicts the next token in a sequence of tokens, and the model can only attend to tokens on the left."),R=f(),g(Y.$$.fragment),he=f(),M=r("p"),z=l("Masked language modeling predicts a masked token in a sequence, and the model can attend to tokens bidirectionally."),He=f(),D=r("p"),Jt=l("This guide will show you how to fine-tune "),I=r("a"),Kt=l("DistilGPT2"),Qt=l(" for causal language modeling and "),We=r("a"),xn=l("DistilRoBERTa"),Tn=l(" for masked language modeling on the "),Ye=r("a"),An=l("r/askscience"),qn=l(" subset of the "),Ue=r("a"),Fn=l("ELI5"),Mn=l(" dataset."),ba=f(),g(me.$$.fragment),Ea=f(),se=r("h2"),ue=r("a"),xs=r("span"),g(Je.$$.fragment),Cn=f(),Ts=r("span"),Dn=l("Load ELI5 dataset"),xa=f(),Vt=r("p"),zn=l("Load only the first 5000 rows of the ELI5 dataset from the \u{1F917} Datasets library since it is pretty large:"),Ta=f(),g(Ke.$$.fragment),Aa=f(),Xt=r("p"),Pn=l("Split this dataset into a train and test set:"),qa=f(),g(Qe.$$.fragment),Fa=f(),Zt=r("p"),Ln=l("Then take a look at an example:"),Ma=f(),g(Ve.$$.fragment),Ca=f(),B=r("p"),In=l("Notice "),As=r("code"),Sn=l("text"),On=l(" is a subfield nested inside the "),qs=r("code"),Nn=l("answers"),Rn=l(" dictionary. When you preprocess the dataset, you will need to extract the "),Fs=r("code"),Bn=l("text"),Gn=l(" subfield into a separate column."),Da=f(),ae=r("h2"),ce=r("a"),Ms=r("span"),g(Xe.$$.fragment),Hn=f(),Cs=r("span"),Wn=l("Preprocess"),za=f(),g(Ze.$$.fragment),Pa=f(),de=r("p"),Yn=l("For causal language modeling, load the DistilGPT2 tokenizer to process the "),Ds=r("code"),Un=l("text"),Jn=l(" subfield:"),La=f(),g(et.$$.fragment),Ia=f(),g(tt.$$.fragment),Sa=f(),es=r("p"),Kn=l("For masked language modeling, load the DistilRoBERTa tokenizer instead:"),Oa=f(),g(st.$$.fragment),Na=f(),J=r("p"),Qn=l("Extract the "),zs=r("code"),Vn=l("text"),Xn=l(" subfield from its nested structure with the "),at=r("a"),Ps=r("code"),Zn=l("flatten"),er=l(" method:"),Ra=f(),g(lt.$$.fragment),Ba=f(),K=r("p"),tr=l("Each subfield is now a separate column as indicated by the "),Ls=r("code"),sr=l("answers"),ar=l(" prefix. Notice that "),Is=r("code"),lr=l("answers.text"),nr=l(" is a list. Instead of tokenizing each sentence separately, convert the list to a string to jointly tokenize them."),Ga=f(),ts=r("p"),rr=l("Here is how you can create a preprocessing function to convert the list to a string and truncate sequences to be no longer than DistilGPT2\u2019s maximum input length:"),Ha=f(),g(nt.$$.fragment),Wa=f(),S=r("p"),or=l("Use \u{1F917} Datasets "),rt=r("a"),Ss=r("code"),ir=l("map"),pr=l(" function to apply the preprocessing function over the entire dataset. You can speed up the "),Os=r("code"),fr=l("map"),hr=l(" function by setting "),Ns=r("code"),mr=l("batched=True"),ur=l(" to process multiple elements of the dataset at once and increasing the number of processes with "),Rs=r("code"),cr=l("num_proc"),dr=l(". Remove the columns you don\u2019t need:"),Ya=f(),g(ot.$$.fragment),Ua=f(),ss=r("p"),gr=l("Now you need a second preprocessing function to capture text truncated from any lengthy examples to prevent loss of information. This preprocessing function should:"),Ja=f(),ge=r("ul"),Bs=r("li"),_r=l("Concatenate all the text."),$r=f(),it=r("li"),wr=l("Split the concatenated text into smaller chunks defined by "),Gs=r("code"),kr=l("block_size"),jr=l("."),Ka=f(),g(pt.$$.fragment),Qa=f(),_e=r("p"),vr=l("Apply the "),Hs=r("code"),yr=l("group_texts"),br=l(" function over the entire dataset:"),Va=f(),g(ft.$$.fragment),Xa=f(),O=r("p"),Er=l("For causal language modeling, use "),as=r("a"),xr=l("DataCollatorForLanguageModeling"),Tr=l(" to create a batch of examples. It will also "),Ws=r("em"),Ar=l("dynamically pad"),qr=l(" your text to the length of the longest element in its batch, so they are a uniform length. While it is possible to pad your text in the "),Ys=r("code"),Fr=l("tokenizer"),Mr=l(" function by setting "),Us=r("code"),Cr=l("padding=True"),Dr=l(", dynamic padding is more efficient."),Za=f(),$e=r("p"),zr=l("You can use the end of sequence token as the padding token, and set "),Js=r("code"),Pr=l("mlm=False"),Lr=l(". This will use the inputs as labels shifted to the right by one element:"),el=f(),g(ht.$$.fragment),tl=f(),Q=r("p"),Ir=l("For masked language modeling, use the same "),ls=r("a"),Sr=l("DataCollatorForLanguageModeling"),Or=l(" except you should specify "),Ks=r("code"),Nr=l("mlm_probability"),Rr=l(" to randomly mask tokens each time you iterate over the data."),sl=f(),g(mt.$$.fragment),al=f(),le=r("h2"),we=r("a"),Qs=r("span"),g(ut.$$.fragment),Br=f(),Vs=r("span"),Gr=l("Causal language modeling"),ll=f(),ke=r("p"),Hr=l("Causal language modeling is frequently used for text generation. This section shows you how to fine-tune "),ct=r("a"),Wr=l("DistilGPT2"),Yr=l(" to generate new text."),nl=f(),ne=r("h3"),je=r("a"),Xs=r("span"),g(dt.$$.fragment),Ur=f(),Zs=r("span"),Jr=l("Fine-tune with Trainer"),rl=f(),ve=r("p"),Kr=l("Load DistilGPT2 with "),ns=r("a"),Qr=l("AutoModelForCausalLM"),Vr=l(":"),ol=f(),g(gt.$$.fragment),il=f(),g(ye.$$.fragment),pl=f(),rs=r("p"),Xr=l("At this point, only three steps remain:"),fl=f(),V=r("ol"),_t=r("li"),Zr=l("Define your training hyperparameters in "),os=r("a"),eo=l("TrainingArguments"),to=l("."),so=f(),$t=r("li"),ao=l("Pass the training arguments to "),is=r("a"),lo=l("Trainer"),no=l(" along with the model, datasets, and data collator."),ro=f(),wt=r("li"),oo=l("Call "),ps=r("a"),io=l("train()"),po=l(" to fine-tune your model."),hl=f(),g(kt.$$.fragment),ml=f(),re=r("h3"),be=r("a"),ea=r("span"),g(jt.$$.fragment),fo=f(),ta=r("span"),ho=l("Fine-tune with TensorFlow"),ul=f(),fs=r("p"),mo=l("To fine-tune a model in TensorFlow is just as easy, with only a few differences."),cl=f(),g(Ee.$$.fragment),dl=f(),G=r("p"),uo=l("Convert your datasets to the "),sa=r("code"),co=l("tf.data.Dataset"),go=l(" format with "),vt=r("a"),aa=r("code"),_o=l("to_tf_dataset"),$o=l(". Specify inputs and labels in "),la=r("code"),wo=l("columns"),ko=l(", whether to shuffle the dataset order, batch size, and the data collator:"),gl=f(),g(yt.$$.fragment),_l=f(),hs=r("p"),jo=l("Set up an optimizer function, learning rate, and some training hyperparameters:"),$l=f(),g(bt.$$.fragment),wl=f(),xe=r("p"),vo=l("Load DistilGPT2 with "),ms=r("a"),yo=l("TFAutoModelForCausalLM"),bo=l(":"),kl=f(),g(Et.$$.fragment),jl=f(),Te=r("p"),Eo=l("Configure the model for training with "),xt=r("a"),na=r("code"),xo=l("compile"),To=l(":"),vl=f(),g(Tt.$$.fragment),yl=f(),Ae=r("p"),Ao=l("Call "),At=r("a"),ra=r("code"),qo=l("fit"),Fo=l(" to fine-tune the model:"),bl=f(),g(qt.$$.fragment),El=f(),oe=r("h2"),qe=r("a"),oa=r("span"),g(Ft.$$.fragment),Mo=f(),ia=r("span"),Co=l("Masked language modeling"),xl=f(),Fe=r("p"),Do=l("Masked language modeling is also known as a fill-mask task because it predicts a masked token in a sequence. Models for masked language modeling require a good contextual understanding of an entire sequence instead of only the left context. This section shows you how to fine-tune "),Mt=r("a"),zo=l("DistilRoBERTa"),Po=l(" to predict a masked word."),Tl=f(),ie=r("h3"),Me=r("a"),pa=r("span"),g(Ct.$$.fragment),Lo=f(),fa=r("span"),Io=l("Fine-tune with Trainer"),Al=f(),Ce=r("p"),So=l("Load DistilRoBERTa with "),ha=r("code"),Oo=l("AutoModelForMaskedlM"),No=l(":"),ql=f(),g(Dt.$$.fragment),Fl=f(),g(De.$$.fragment),Ml=f(),us=r("p"),Ro=l("At this point, only three steps remain:"),Cl=f(),X=r("ol"),zt=r("li"),Bo=l("Define your training hyperparameters in "),cs=r("a"),Go=l("TrainingArguments"),Ho=l("."),Wo=f(),Pt=r("li"),Yo=l("Pass the training arguments to "),ds=r("a"),Uo=l("Trainer"),Jo=l(" along with the model, datasets, and data collator."),Ko=f(),Lt=r("li"),Qo=l("Call "),gs=r("a"),Vo=l("train()"),Xo=l(" to fine-tune your model."),Dl=f(),g(It.$$.fragment),zl=f(),pe=r("h3"),ze=r("a"),ma=r("span"),g(St.$$.fragment),Zo=f(),ua=r("span"),ei=l("Fine-tune with TensorFlow"),Pl=f(),_s=r("p"),ti=l("To fine-tune a model in TensorFlow is just as easy, with only a few differences."),Ll=f(),g(Pe.$$.fragment),Il=f(),H=r("p"),si=l("Convert your datasets to the "),ca=r("code"),ai=l("tf.data.Dataset"),li=l(" format with "),Ot=r("a"),da=r("code"),ni=l("to_tf_dataset"),ri=l(". Specify inputs and labels in "),ga=r("code"),oi=l("columns"),ii=l(", whether to shuffle the dataset order, batch size, and the data collator:"),Sl=f(),g(Nt.$$.fragment),Ol=f(),$s=r("p"),pi=l("Set up an optimizer function, learning rate, and some training hyperparameters:"),Nl=f(),g(Rt.$$.fragment),Rl=f(),Le=r("p"),fi=l("Load DistilRoBERTa with "),ws=r("a"),hi=l("TFAutoModelForMaskedLM"),mi=l(":"),Bl=f(),g(Bt.$$.fragment),Gl=f(),Ie=r("p"),ui=l("Configure the model for training with "),Gt=r("a"),_a=r("code"),ci=l("compile"),di=l(":"),Hl=f(),g(Ht.$$.fragment),Wl=f(),Se=r("p"),gi=l("Call "),Wt=r("a"),$a=r("code"),_i=l("fit"),$i=l(" to fine-tune the model:"),Yl=f(),g(Yt.$$.fragment),Ul=f(),g(Oe.$$.fragment),this.h()},l(e){const a=pf('[data-svelte="svelte-1phssyn"]',document.head);u=o(a,"META",{name:!0,content:!0}),a.forEach(t),E=h(e),c=o(e,"H1",{class:!0});var Ut=i(c);v=o(Ut,"A",{id:!0,class:!0,href:!0});var wa=i(v);x=o(wa,"SPAN",{});var ka=i(x);_(d.$$.fragment,ka),ka.forEach(t),wa.forEach(t),b=h(Ut),A=o(Ut,"SPAN",{});var ja=i(A);T=n(ja,"Language modeling"),ja.forEach(t),Ut.forEach(t),y=h(e),F=o(e,"P",{});var va=i(F);C=n(va,"Language modeling predicts words in a sentence. There are two forms of language modeling."),va.forEach(t),L=h(e),_(W.$$.fragment,e),N=h(e),U=o(e,"P",{});var ya=i(U);Ge=n(ya,"Causal language modeling predicts the next token in a sequence of tokens, and the model can only attend to tokens on the left."),ya.forEach(t),R=h(e),_(Y.$$.fragment,e),he=h(e),M=o(e,"P",{});var wi=i(M);z=n(wi,"Masked language modeling predicts a masked token in a sequence, and the model can attend to tokens bidirectionally."),wi.forEach(t),He=h(e),D=o(e,"P",{});var Z=i(D);Jt=n(Z,"This guide will show you how to fine-tune "),I=o(Z,"A",{href:!0,rel:!0});var ki=i(I);Kt=n(ki,"DistilGPT2"),ki.forEach(t),Qt=n(Z," for causal language modeling and "),We=o(Z,"A",{href:!0,rel:!0});var ji=i(We);xn=n(ji,"DistilRoBERTa"),ji.forEach(t),Tn=n(Z," for masked language modeling on the "),Ye=o(Z,"A",{href:!0,rel:!0});var vi=i(Ye);An=n(vi,"r/askscience"),vi.forEach(t),qn=n(Z," subset of the "),Ue=o(Z,"A",{href:!0,rel:!0});var yi=i(Ue);Fn=n(yi,"ELI5"),yi.forEach(t),Mn=n(Z," dataset."),Z.forEach(t),ba=h(e),_(me.$$.fragment,e),Ea=h(e),se=o(e,"H2",{class:!0});var Kl=i(se);ue=o(Kl,"A",{id:!0,class:!0,href:!0});var bi=i(ue);xs=o(bi,"SPAN",{});var Ei=i(xs);_(Je.$$.fragment,Ei),Ei.forEach(t),bi.forEach(t),Cn=h(Kl),Ts=o(Kl,"SPAN",{});var xi=i(Ts);Dn=n(xi,"Load ELI5 dataset"),xi.forEach(t),Kl.forEach(t),xa=h(e),Vt=o(e,"P",{});var Ti=i(Vt);zn=n(Ti,"Load only the first 5000 rows of the ELI5 dataset from the \u{1F917} Datasets library since it is pretty large:"),Ti.forEach(t),Ta=h(e),_(Ke.$$.fragment,e),Aa=h(e),Xt=o(e,"P",{});var Ai=i(Xt);Pn=n(Ai,"Split this dataset into a train and test set:"),Ai.forEach(t),qa=h(e),_(Qe.$$.fragment,e),Fa=h(e),Zt=o(e,"P",{});var qi=i(Zt);Ln=n(qi,"Then take a look at an example:"),qi.forEach(t),Ma=h(e),_(Ve.$$.fragment,e),Ca=h(e),B=o(e,"P",{});var Ne=i(B);In=n(Ne,"Notice "),As=o(Ne,"CODE",{});var Fi=i(As);Sn=n(Fi,"text"),Fi.forEach(t),On=n(Ne," is a subfield nested inside the "),qs=o(Ne,"CODE",{});var Mi=i(qs);Nn=n(Mi,"answers"),Mi.forEach(t),Rn=n(Ne," dictionary. When you preprocess the dataset, you will need to extract the "),Fs=o(Ne,"CODE",{});var Ci=i(Fs);Bn=n(Ci,"text"),Ci.forEach(t),Gn=n(Ne," subfield into a separate column."),Ne.forEach(t),Da=h(e),ae=o(e,"H2",{class:!0});var Ql=i(ae);ce=o(Ql,"A",{id:!0,class:!0,href:!0});var Di=i(ce);Ms=o(Di,"SPAN",{});var zi=i(Ms);_(Xe.$$.fragment,zi),zi.forEach(t),Di.forEach(t),Hn=h(Ql),Cs=o(Ql,"SPAN",{});var Pi=i(Cs);Wn=n(Pi,"Preprocess"),Pi.forEach(t),Ql.forEach(t),za=h(e),_(Ze.$$.fragment,e),Pa=h(e),de=o(e,"P",{});var Vl=i(de);Yn=n(Vl,"For causal language modeling, load the DistilGPT2 tokenizer to process the "),Ds=o(Vl,"CODE",{});var Li=i(Ds);Un=n(Li,"text"),Li.forEach(t),Jn=n(Vl," subfield:"),Vl.forEach(t),La=h(e),_(et.$$.fragment,e),Ia=h(e),_(tt.$$.fragment,e),Sa=h(e),es=o(e,"P",{});var Ii=i(es);Kn=n(Ii,"For masked language modeling, load the DistilRoBERTa tokenizer instead:"),Ii.forEach(t),Oa=h(e),_(st.$$.fragment,e),Na=h(e),J=o(e,"P",{});var ks=i(J);Qn=n(ks,"Extract the "),zs=o(ks,"CODE",{});var Si=i(zs);Vn=n(Si,"text"),Si.forEach(t),Xn=n(ks," subfield from its nested structure with the "),at=o(ks,"A",{href:!0,rel:!0});var Oi=i(at);Ps=o(Oi,"CODE",{});var Ni=i(Ps);Zn=n(Ni,"flatten"),Ni.forEach(t),Oi.forEach(t),er=n(ks," method:"),ks.forEach(t),Ra=h(e),_(lt.$$.fragment,e),Ba=h(e),K=o(e,"P",{});var js=i(K);tr=n(js,"Each subfield is now a separate column as indicated by the "),Ls=o(js,"CODE",{});var Ri=i(Ls);sr=n(Ri,"answers"),Ri.forEach(t),ar=n(js," prefix. Notice that "),Is=o(js,"CODE",{});var Bi=i(Is);lr=n(Bi,"answers.text"),Bi.forEach(t),nr=n(js," is a list. Instead of tokenizing each sentence separately, convert the list to a string to jointly tokenize them."),js.forEach(t),Ga=h(e),ts=o(e,"P",{});var Gi=i(ts);rr=n(Gi,"Here is how you can create a preprocessing function to convert the list to a string and truncate sequences to be no longer than DistilGPT2\u2019s maximum input length:"),Gi.forEach(t),Ha=h(e),_(nt.$$.fragment,e),Wa=h(e),S=o(e,"P",{});var ee=i(S);or=n(ee,"Use \u{1F917} Datasets "),rt=o(ee,"A",{href:!0,rel:!0});var Hi=i(rt);Ss=o(Hi,"CODE",{});var Wi=i(Ss);ir=n(Wi,"map"),Wi.forEach(t),Hi.forEach(t),pr=n(ee," function to apply the preprocessing function over the entire dataset. You can speed up the "),Os=o(ee,"CODE",{});var Yi=i(Os);fr=n(Yi,"map"),Yi.forEach(t),hr=n(ee," function by setting "),Ns=o(ee,"CODE",{});var Ui=i(Ns);mr=n(Ui,"batched=True"),Ui.forEach(t),ur=n(ee," to process multiple elements of the dataset at once and increasing the number of processes with "),Rs=o(ee,"CODE",{});var Ji=i(Rs);cr=n(Ji,"num_proc"),Ji.forEach(t),dr=n(ee,". Remove the columns you don\u2019t need:"),ee.forEach(t),Ya=h(e),_(ot.$$.fragment,e),Ua=h(e),ss=o(e,"P",{});var Ki=i(ss);gr=n(Ki,"Now you need a second preprocessing function to capture text truncated from any lengthy examples to prevent loss of information. This preprocessing function should:"),Ki.forEach(t),Ja=h(e),ge=o(e,"UL",{});var Xl=i(ge);Bs=o(Xl,"LI",{});var Qi=i(Bs);_r=n(Qi,"Concatenate all the text."),Qi.forEach(t),$r=h(Xl),it=o(Xl,"LI",{});var Zl=i(it);wr=n(Zl,"Split the concatenated text into smaller chunks defined by "),Gs=o(Zl,"CODE",{});var Vi=i(Gs);kr=n(Vi,"block_size"),Vi.forEach(t),jr=n(Zl,"."),Zl.forEach(t),Xl.forEach(t),Ka=h(e),_(pt.$$.fragment,e),Qa=h(e),_e=o(e,"P",{});var en=i(_e);vr=n(en,"Apply the "),Hs=o(en,"CODE",{});var Xi=i(Hs);yr=n(Xi,"group_texts"),Xi.forEach(t),br=n(en," function over the entire dataset:"),en.forEach(t),Va=h(e),_(ft.$$.fragment,e),Xa=h(e),O=o(e,"P",{});var te=i(O);Er=n(te,"For causal language modeling, use "),as=o(te,"A",{href:!0});var Zi=i(as);xr=n(Zi,"DataCollatorForLanguageModeling"),Zi.forEach(t),Tr=n(te," to create a batch of examples. It will also "),Ws=o(te,"EM",{});var ep=i(Ws);Ar=n(ep,"dynamically pad"),ep.forEach(t),qr=n(te," your text to the length of the longest element in its batch, so they are a uniform length. While it is possible to pad your text in the "),Ys=o(te,"CODE",{});var tp=i(Ys);Fr=n(tp,"tokenizer"),tp.forEach(t),Mr=n(te," function by setting "),Us=o(te,"CODE",{});var sp=i(Us);Cr=n(sp,"padding=True"),sp.forEach(t),Dr=n(te,", dynamic padding is more efficient."),te.forEach(t),Za=h(e),$e=o(e,"P",{});var tn=i($e);zr=n(tn,"You can use the end of sequence token as the padding token, and set "),Js=o(tn,"CODE",{});var ap=i(Js);Pr=n(ap,"mlm=False"),ap.forEach(t),Lr=n(tn,". This will use the inputs as labels shifted to the right by one element:"),tn.forEach(t),el=h(e),_(ht.$$.fragment,e),tl=h(e),Q=o(e,"P",{});var vs=i(Q);Ir=n(vs,"For masked language modeling, use the same "),ls=o(vs,"A",{href:!0});var lp=i(ls);Sr=n(lp,"DataCollatorForLanguageModeling"),lp.forEach(t),Or=n(vs," except you should specify "),Ks=o(vs,"CODE",{});var np=i(Ks);Nr=n(np,"mlm_probability"),np.forEach(t),Rr=n(vs," to randomly mask tokens each time you iterate over the data."),vs.forEach(t),sl=h(e),_(mt.$$.fragment,e),al=h(e),le=o(e,"H2",{class:!0});var sn=i(le);we=o(sn,"A",{id:!0,class:!0,href:!0});var rp=i(we);Qs=o(rp,"SPAN",{});var op=i(Qs);_(ut.$$.fragment,op),op.forEach(t),rp.forEach(t),Br=h(sn),Vs=o(sn,"SPAN",{});var ip=i(Vs);Gr=n(ip,"Causal language modeling"),ip.forEach(t),sn.forEach(t),ll=h(e),ke=o(e,"P",{});var an=i(ke);Hr=n(an,"Causal language modeling is frequently used for text generation. This section shows you how to fine-tune "),ct=o(an,"A",{href:!0,rel:!0});var pp=i(ct);Wr=n(pp,"DistilGPT2"),pp.forEach(t),Yr=n(an," to generate new text."),an.forEach(t),nl=h(e),ne=o(e,"H3",{class:!0});var ln=i(ne);je=o(ln,"A",{id:!0,class:!0,href:!0});var fp=i(je);Xs=o(fp,"SPAN",{});var hp=i(Xs);_(dt.$$.fragment,hp),hp.forEach(t),fp.forEach(t),Ur=h(ln),Zs=o(ln,"SPAN",{});var mp=i(Zs);Jr=n(mp,"Fine-tune with Trainer"),mp.forEach(t),ln.forEach(t),rl=h(e),ve=o(e,"P",{});var nn=i(ve);Kr=n(nn,"Load DistilGPT2 with "),ns=o(nn,"A",{href:!0});var up=i(ns);Qr=n(up,"AutoModelForCausalLM"),up.forEach(t),Vr=n(nn,":"),nn.forEach(t),ol=h(e),_(gt.$$.fragment,e),il=h(e),_(ye.$$.fragment,e),pl=h(e),rs=o(e,"P",{});var cp=i(rs);Xr=n(cp,"At this point, only three steps remain:"),cp.forEach(t),fl=h(e),V=o(e,"OL",{});var ys=i(V);_t=o(ys,"LI",{});var rn=i(_t);Zr=n(rn,"Define your training hyperparameters in "),os=o(rn,"A",{href:!0});var dp=i(os);eo=n(dp,"TrainingArguments"),dp.forEach(t),to=n(rn,"."),rn.forEach(t),so=h(ys),$t=o(ys,"LI",{});var on=i($t);ao=n(on,"Pass the training arguments to "),is=o(on,"A",{href:!0});var gp=i(is);lo=n(gp,"Trainer"),gp.forEach(t),no=n(on," along with the model, datasets, and data collator."),on.forEach(t),ro=h(ys),wt=o(ys,"LI",{});var pn=i(wt);oo=n(pn,"Call "),ps=o(pn,"A",{href:!0});var _p=i(ps);io=n(_p,"train()"),_p.forEach(t),po=n(pn," to fine-tune your model."),pn.forEach(t),ys.forEach(t),hl=h(e),_(kt.$$.fragment,e),ml=h(e),re=o(e,"H3",{class:!0});var fn=i(re);be=o(fn,"A",{id:!0,class:!0,href:!0});var $p=i(be);ea=o($p,"SPAN",{});var wp=i(ea);_(jt.$$.fragment,wp),wp.forEach(t),$p.forEach(t),fo=h(fn),ta=o(fn,"SPAN",{});var kp=i(ta);ho=n(kp,"Fine-tune with TensorFlow"),kp.forEach(t),fn.forEach(t),ul=h(e),fs=o(e,"P",{});var jp=i(fs);mo=n(jp,"To fine-tune a model in TensorFlow is just as easy, with only a few differences."),jp.forEach(t),cl=h(e),_(Ee.$$.fragment,e),dl=h(e),G=o(e,"P",{});var Re=i(G);uo=n(Re,"Convert your datasets to the "),sa=o(Re,"CODE",{});var vp=i(sa);co=n(vp,"tf.data.Dataset"),vp.forEach(t),go=n(Re," format with "),vt=o(Re,"A",{href:!0,rel:!0});var yp=i(vt);aa=o(yp,"CODE",{});var bp=i(aa);_o=n(bp,"to_tf_dataset"),bp.forEach(t),yp.forEach(t),$o=n(Re,". Specify inputs and labels in "),la=o(Re,"CODE",{});var Ep=i(la);wo=n(Ep,"columns"),Ep.forEach(t),ko=n(Re,", whether to shuffle the dataset order, batch size, and the data collator:"),Re.forEach(t),gl=h(e),_(yt.$$.fragment,e),_l=h(e),hs=o(e,"P",{});var xp=i(hs);jo=n(xp,"Set up an optimizer function, learning rate, and some training hyperparameters:"),xp.forEach(t),$l=h(e),_(bt.$$.fragment,e),wl=h(e),xe=o(e,"P",{});var hn=i(xe);vo=n(hn,"Load DistilGPT2 with "),ms=o(hn,"A",{href:!0});var Tp=i(ms);yo=n(Tp,"TFAutoModelForCausalLM"),Tp.forEach(t),bo=n(hn,":"),hn.forEach(t),kl=h(e),_(Et.$$.fragment,e),jl=h(e),Te=o(e,"P",{});var mn=i(Te);Eo=n(mn,"Configure the model for training with "),xt=o(mn,"A",{href:!0,rel:!0});var Ap=i(xt);na=o(Ap,"CODE",{});var qp=i(na);xo=n(qp,"compile"),qp.forEach(t),Ap.forEach(t),To=n(mn,":"),mn.forEach(t),vl=h(e),_(Tt.$$.fragment,e),yl=h(e),Ae=o(e,"P",{});var un=i(Ae);Ao=n(un,"Call "),At=o(un,"A",{href:!0,rel:!0});var Fp=i(At);ra=o(Fp,"CODE",{});var Mp=i(ra);qo=n(Mp,"fit"),Mp.forEach(t),Fp.forEach(t),Fo=n(un," to fine-tune the model:"),un.forEach(t),bl=h(e),_(qt.$$.fragment,e),El=h(e),oe=o(e,"H2",{class:!0});var cn=i(oe);qe=o(cn,"A",{id:!0,class:!0,href:!0});var Cp=i(qe);oa=o(Cp,"SPAN",{});var Dp=i(oa);_(Ft.$$.fragment,Dp),Dp.forEach(t),Cp.forEach(t),Mo=h(cn),ia=o(cn,"SPAN",{});var zp=i(ia);Co=n(zp,"Masked language modeling"),zp.forEach(t),cn.forEach(t),xl=h(e),Fe=o(e,"P",{});var dn=i(Fe);Do=n(dn,"Masked language modeling is also known as a fill-mask task because it predicts a masked token in a sequence. Models for masked language modeling require a good contextual understanding of an entire sequence instead of only the left context. This section shows you how to fine-tune "),Mt=o(dn,"A",{href:!0,rel:!0});var Pp=i(Mt);zo=n(Pp,"DistilRoBERTa"),Pp.forEach(t),Po=n(dn," to predict a masked word."),dn.forEach(t),Tl=h(e),ie=o(e,"H3",{class:!0});var gn=i(ie);Me=o(gn,"A",{id:!0,class:!0,href:!0});var Lp=i(Me);pa=o(Lp,"SPAN",{});var Ip=i(pa);_(Ct.$$.fragment,Ip),Ip.forEach(t),Lp.forEach(t),Lo=h(gn),fa=o(gn,"SPAN",{});var Sp=i(fa);Io=n(Sp,"Fine-tune with Trainer"),Sp.forEach(t),gn.forEach(t),Al=h(e),Ce=o(e,"P",{});var _n=i(Ce);So=n(_n,"Load DistilRoBERTa with "),ha=o(_n,"CODE",{});var Op=i(ha);Oo=n(Op,"AutoModelForMaskedlM"),Op.forEach(t),No=n(_n,":"),_n.forEach(t),ql=h(e),_(Dt.$$.fragment,e),Fl=h(e),_(De.$$.fragment,e),Ml=h(e),us=o(e,"P",{});var Np=i(us);Ro=n(Np,"At this point, only three steps remain:"),Np.forEach(t),Cl=h(e),X=o(e,"OL",{});var bs=i(X);zt=o(bs,"LI",{});var $n=i(zt);Bo=n($n,"Define your training hyperparameters in "),cs=o($n,"A",{href:!0});var Rp=i(cs);Go=n(Rp,"TrainingArguments"),Rp.forEach(t),Ho=n($n,"."),$n.forEach(t),Wo=h(bs),Pt=o(bs,"LI",{});var wn=i(Pt);Yo=n(wn,"Pass the training arguments to "),ds=o(wn,"A",{href:!0});var Bp=i(ds);Uo=n(Bp,"Trainer"),Bp.forEach(t),Jo=n(wn," along with the model, datasets, and data collator."),wn.forEach(t),Ko=h(bs),Lt=o(bs,"LI",{});var kn=i(Lt);Qo=n(kn,"Call "),gs=o(kn,"A",{href:!0});var Gp=i(gs);Vo=n(Gp,"train()"),Gp.forEach(t),Xo=n(kn," to fine-tune your model."),kn.forEach(t),bs.forEach(t),Dl=h(e),_(It.$$.fragment,e),zl=h(e),pe=o(e,"H3",{class:!0});var jn=i(pe);ze=o(jn,"A",{id:!0,class:!0,href:!0});var Hp=i(ze);ma=o(Hp,"SPAN",{});var Wp=i(ma);_(St.$$.fragment,Wp),Wp.forEach(t),Hp.forEach(t),Zo=h(jn),ua=o(jn,"SPAN",{});var Yp=i(ua);ei=n(Yp,"Fine-tune with TensorFlow"),Yp.forEach(t),jn.forEach(t),Pl=h(e),_s=o(e,"P",{});var Up=i(_s);ti=n(Up,"To fine-tune a model in TensorFlow is just as easy, with only a few differences."),Up.forEach(t),Ll=h(e),_(Pe.$$.fragment,e),Il=h(e),H=o(e,"P",{});var Be=i(H);si=n(Be,"Convert your datasets to the "),ca=o(Be,"CODE",{});var Jp=i(ca);ai=n(Jp,"tf.data.Dataset"),Jp.forEach(t),li=n(Be," format with "),Ot=o(Be,"A",{href:!0,rel:!0});var Kp=i(Ot);da=o(Kp,"CODE",{});var Qp=i(da);ni=n(Qp,"to_tf_dataset"),Qp.forEach(t),Kp.forEach(t),ri=n(Be,". Specify inputs and labels in "),ga=o(Be,"CODE",{});var Vp=i(ga);oi=n(Vp,"columns"),Vp.forEach(t),ii=n(Be,", whether to shuffle the dataset order, batch size, and the data collator:"),Be.forEach(t),Sl=h(e),_(Nt.$$.fragment,e),Ol=h(e),$s=o(e,"P",{});var Xp=i($s);pi=n(Xp,"Set up an optimizer function, learning rate, and some training hyperparameters:"),Xp.forEach(t),Nl=h(e),_(Rt.$$.fragment,e),Rl=h(e),Le=o(e,"P",{});var vn=i(Le);fi=n(vn,"Load DistilRoBERTa with "),ws=o(vn,"A",{href:!0});var Zp=i(ws);hi=n(Zp,"TFAutoModelForMaskedLM"),Zp.forEach(t),mi=n(vn,":"),vn.forEach(t),Bl=h(e),_(Bt.$$.fragment,e),Gl=h(e),Ie=o(e,"P",{});var yn=i(Ie);ui=n(yn,"Configure the model for training with "),Gt=o(yn,"A",{href:!0,rel:!0});var ef=i(Gt);_a=o(ef,"CODE",{});var tf=i(_a);ci=n(tf,"compile"),tf.forEach(t),ef.forEach(t),di=n(yn,":"),yn.forEach(t),Hl=h(e),_(Ht.$$.fragment,e),Wl=h(e),Se=o(e,"P",{});var bn=i(Se);gi=n(bn,"Call "),Wt=o(bn,"A",{href:!0,rel:!0});var sf=i(Wt);$a=o(sf,"CODE",{});var af=i($a);_i=n(af,"fit"),af.forEach(t),sf.forEach(t),$i=n(bn," to fine-tune the model:"),bn.forEach(t),Yl=h(e),_(Yt.$$.fragment,e),Ul=h(e),_(Oe.$$.fragment,e),this.h()},h(){m(u,"name","hf:doc:metadata"),m(u,"content",JSON.stringify(_f)),m(v,"id","language-modeling"),m(v,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(v,"href","#language-modeling"),m(c,"class","relative group"),m(I,"href","https://huggingface.co/distilgpt2"),m(I,"rel","nofollow"),m(We,"href","https://huggingface.co/distilroberta-base"),m(We,"rel","nofollow"),m(Ye,"href","https://www.reddit.com/r/askscience/"),m(Ye,"rel","nofollow"),m(Ue,"href","https://huggingface.co/datasets/eli5"),m(Ue,"rel","nofollow"),m(ue,"id","load-eli5-dataset"),m(ue,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(ue,"href","#load-eli5-dataset"),m(se,"class","relative group"),m(ce,"id","preprocess"),m(ce,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(ce,"href","#preprocess"),m(ae,"class","relative group"),m(at,"href","https://huggingface.co/docs/datasets/process.html#flatten"),m(at,"rel","nofollow"),m(rt,"href","https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map"),m(rt,"rel","nofollow"),m(as,"href","/docs/transformers/pr_16143/en/main_classes/data_collator#transformers.DataCollatorForLanguageModeling"),m(ls,"href","/docs/transformers/pr_16143/en/main_classes/data_collator#transformers.DataCollatorForLanguageModeling"),m(we,"id","causal-language-modeling"),m(we,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(we,"href","#causal-language-modeling"),m(le,"class","relative group"),m(ct,"href","https://huggingface.co/distilgpt2"),m(ct,"rel","nofollow"),m(je,"id","finetune-with-trainer"),m(je,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(je,"href","#finetune-with-trainer"),m(ne,"class","relative group"),m(ns,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoModelForCausalLM"),m(os,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments"),m(is,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),m(ps,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer.train"),m(be,"id","finetune-with-tensorflow"),m(be,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(be,"href","#finetune-with-tensorflow"),m(re,"class","relative group"),m(vt,"href","https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.to_tf_dataset"),m(vt,"rel","nofollow"),m(ms,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.TFAutoModelForCausalLM"),m(xt,"href","https://keras.io/api/models/model_training_apis/#compile-method"),m(xt,"rel","nofollow"),m(At,"href","https://keras.io/api/models/model_training_apis/#fit-method"),m(At,"rel","nofollow"),m(qe,"id","masked-language-modeling"),m(qe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(qe,"href","#masked-language-modeling"),m(oe,"class","relative group"),m(Mt,"href","https://huggingface.co/distilroberta-base"),m(Mt,"rel","nofollow"),m(Me,"id","finetune-with-trainer"),m(Me,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Me,"href","#finetune-with-trainer"),m(ie,"class","relative group"),m(cs,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments"),m(ds,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),m(gs,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer.train"),m(ze,"id","finetune-with-tensorflow"),m(ze,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(ze,"href","#finetune-with-tensorflow"),m(pe,"class","relative group"),m(Ot,"href","https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.to_tf_dataset"),m(Ot,"rel","nofollow"),m(ws,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.TFAutoModelForMaskedLM"),m(Gt,"href","https://keras.io/api/models/model_training_apis/#compile-method"),m(Gt,"rel","nofollow"),m(Wt,"href","https://keras.io/api/models/model_training_apis/#fit-method"),m(Wt,"rel","nofollow")},m(e,a){s(document.head,u),p(e,E,a),p(e,c,a),s(c,v),s(v,x),$(d,x,null),s(c,b),s(c,A),s(A,T),p(e,y,a),p(e,F,a),s(F,C),p(e,L,a),$(W,e,a),p(e,N,a),p(e,U,a),s(U,Ge),p(e,R,a),$(Y,e,a),p(e,he,a),p(e,M,a),s(M,z),p(e,He,a),p(e,D,a),s(D,Jt),s(D,I),s(I,Kt),s(D,Qt),s(D,We),s(We,xn),s(D,Tn),s(D,Ye),s(Ye,An),s(D,qn),s(D,Ue),s(Ue,Fn),s(D,Mn),p(e,ba,a),$(me,e,a),p(e,Ea,a),p(e,se,a),s(se,ue),s(ue,xs),$(Je,xs,null),s(se,Cn),s(se,Ts),s(Ts,Dn),p(e,xa,a),p(e,Vt,a),s(Vt,zn),p(e,Ta,a),$(Ke,e,a),p(e,Aa,a),p(e,Xt,a),s(Xt,Pn),p(e,qa,a),$(Qe,e,a),p(e,Fa,a),p(e,Zt,a),s(Zt,Ln),p(e,Ma,a),$(Ve,e,a),p(e,Ca,a),p(e,B,a),s(B,In),s(B,As),s(As,Sn),s(B,On),s(B,qs),s(qs,Nn),s(B,Rn),s(B,Fs),s(Fs,Bn),s(B,Gn),p(e,Da,a),p(e,ae,a),s(ae,ce),s(ce,Ms),$(Xe,Ms,null),s(ae,Hn),s(ae,Cs),s(Cs,Wn),p(e,za,a),$(Ze,e,a),p(e,Pa,a),p(e,de,a),s(de,Yn),s(de,Ds),s(Ds,Un),s(de,Jn),p(e,La,a),$(et,e,a),p(e,Ia,a),$(tt,e,a),p(e,Sa,a),p(e,es,a),s(es,Kn),p(e,Oa,a),$(st,e,a),p(e,Na,a),p(e,J,a),s(J,Qn),s(J,zs),s(zs,Vn),s(J,Xn),s(J,at),s(at,Ps),s(Ps,Zn),s(J,er),p(e,Ra,a),$(lt,e,a),p(e,Ba,a),p(e,K,a),s(K,tr),s(K,Ls),s(Ls,sr),s(K,ar),s(K,Is),s(Is,lr),s(K,nr),p(e,Ga,a),p(e,ts,a),s(ts,rr),p(e,Ha,a),$(nt,e,a),p(e,Wa,a),p(e,S,a),s(S,or),s(S,rt),s(rt,Ss),s(Ss,ir),s(S,pr),s(S,Os),s(Os,fr),s(S,hr),s(S,Ns),s(Ns,mr),s(S,ur),s(S,Rs),s(Rs,cr),s(S,dr),p(e,Ya,a),$(ot,e,a),p(e,Ua,a),p(e,ss,a),s(ss,gr),p(e,Ja,a),p(e,ge,a),s(ge,Bs),s(Bs,_r),s(ge,$r),s(ge,it),s(it,wr),s(it,Gs),s(Gs,kr),s(it,jr),p(e,Ka,a),$(pt,e,a),p(e,Qa,a),p(e,_e,a),s(_e,vr),s(_e,Hs),s(Hs,yr),s(_e,br),p(e,Va,a),$(ft,e,a),p(e,Xa,a),p(e,O,a),s(O,Er),s(O,as),s(as,xr),s(O,Tr),s(O,Ws),s(Ws,Ar),s(O,qr),s(O,Ys),s(Ys,Fr),s(O,Mr),s(O,Us),s(Us,Cr),s(O,Dr),p(e,Za,a),p(e,$e,a),s($e,zr),s($e,Js),s(Js,Pr),s($e,Lr),p(e,el,a),$(ht,e,a),p(e,tl,a),p(e,Q,a),s(Q,Ir),s(Q,ls),s(ls,Sr),s(Q,Or),s(Q,Ks),s(Ks,Nr),s(Q,Rr),p(e,sl,a),$(mt,e,a),p(e,al,a),p(e,le,a),s(le,we),s(we,Qs),$(ut,Qs,null),s(le,Br),s(le,Vs),s(Vs,Gr),p(e,ll,a),p(e,ke,a),s(ke,Hr),s(ke,ct),s(ct,Wr),s(ke,Yr),p(e,nl,a),p(e,ne,a),s(ne,je),s(je,Xs),$(dt,Xs,null),s(ne,Ur),s(ne,Zs),s(Zs,Jr),p(e,rl,a),p(e,ve,a),s(ve,Kr),s(ve,ns),s(ns,Qr),s(ve,Vr),p(e,ol,a),$(gt,e,a),p(e,il,a),$(ye,e,a),p(e,pl,a),p(e,rs,a),s(rs,Xr),p(e,fl,a),p(e,V,a),s(V,_t),s(_t,Zr),s(_t,os),s(os,eo),s(_t,to),s(V,so),s(V,$t),s($t,ao),s($t,is),s(is,lo),s($t,no),s(V,ro),s(V,wt),s(wt,oo),s(wt,ps),s(ps,io),s(wt,po),p(e,hl,a),$(kt,e,a),p(e,ml,a),p(e,re,a),s(re,be),s(be,ea),$(jt,ea,null),s(re,fo),s(re,ta),s(ta,ho),p(e,ul,a),p(e,fs,a),s(fs,mo),p(e,cl,a),$(Ee,e,a),p(e,dl,a),p(e,G,a),s(G,uo),s(G,sa),s(sa,co),s(G,go),s(G,vt),s(vt,aa),s(aa,_o),s(G,$o),s(G,la),s(la,wo),s(G,ko),p(e,gl,a),$(yt,e,a),p(e,_l,a),p(e,hs,a),s(hs,jo),p(e,$l,a),$(bt,e,a),p(e,wl,a),p(e,xe,a),s(xe,vo),s(xe,ms),s(ms,yo),s(xe,bo),p(e,kl,a),$(Et,e,a),p(e,jl,a),p(e,Te,a),s(Te,Eo),s(Te,xt),s(xt,na),s(na,xo),s(Te,To),p(e,vl,a),$(Tt,e,a),p(e,yl,a),p(e,Ae,a),s(Ae,Ao),s(Ae,At),s(At,ra),s(ra,qo),s(Ae,Fo),p(e,bl,a),$(qt,e,a),p(e,El,a),p(e,oe,a),s(oe,qe),s(qe,oa),$(Ft,oa,null),s(oe,Mo),s(oe,ia),s(ia,Co),p(e,xl,a),p(e,Fe,a),s(Fe,Do),s(Fe,Mt),s(Mt,zo),s(Fe,Po),p(e,Tl,a),p(e,ie,a),s(ie,Me),s(Me,pa),$(Ct,pa,null),s(ie,Lo),s(ie,fa),s(fa,Io),p(e,Al,a),p(e,Ce,a),s(Ce,So),s(Ce,ha),s(ha,Oo),s(Ce,No),p(e,ql,a),$(Dt,e,a),p(e,Fl,a),$(De,e,a),p(e,Ml,a),p(e,us,a),s(us,Ro),p(e,Cl,a),p(e,X,a),s(X,zt),s(zt,Bo),s(zt,cs),s(cs,Go),s(zt,Ho),s(X,Wo),s(X,Pt),s(Pt,Yo),s(Pt,ds),s(ds,Uo),s(Pt,Jo),s(X,Ko),s(X,Lt),s(Lt,Qo),s(Lt,gs),s(gs,Vo),s(Lt,Xo),p(e,Dl,a),$(It,e,a),p(e,zl,a),p(e,pe,a),s(pe,ze),s(ze,ma),$(St,ma,null),s(pe,Zo),s(pe,ua),s(ua,ei),p(e,Pl,a),p(e,_s,a),s(_s,ti),p(e,Ll,a),$(Pe,e,a),p(e,Il,a),p(e,H,a),s(H,si),s(H,ca),s(ca,ai),s(H,li),s(H,Ot),s(Ot,da),s(da,ni),s(H,ri),s(H,ga),s(ga,oi),s(H,ii),p(e,Sl,a),$(Nt,e,a),p(e,Ol,a),p(e,$s,a),s($s,pi),p(e,Nl,a),$(Rt,e,a),p(e,Rl,a),p(e,Le,a),s(Le,fi),s(Le,ws),s(ws,hi),s(Le,mi),p(e,Bl,a),$(Bt,e,a),p(e,Gl,a),p(e,Ie,a),s(Ie,ui),s(Ie,Gt),s(Gt,_a),s(_a,ci),s(Ie,di),p(e,Hl,a),$(Ht,e,a),p(e,Wl,a),p(e,Se,a),s(Se,gi),s(Se,Wt),s(Wt,$a),s($a,_i),s(Se,$i),p(e,Yl,a),$(Yt,e,a),p(e,Ul,a),$(Oe,e,a),Jl=!0},p(e,[a]){const Ut={};a&2&&(Ut.$$scope={dirty:a,ctx:e}),me.$set(Ut);const wa={};a&2&&(wa.$$scope={dirty:a,ctx:e}),ye.$set(wa);const ka={};a&2&&(ka.$$scope={dirty:a,ctx:e}),Ee.$set(ka);const ja={};a&2&&(ja.$$scope={dirty:a,ctx:e}),De.$set(ja);const va={};a&2&&(va.$$scope={dirty:a,ctx:e}),Pe.$set(va);const ya={};a&2&&(ya.$$scope={dirty:a,ctx:e}),Oe.$set(ya)},i(e){Jl||(w(d.$$.fragment,e),w(W.$$.fragment,e),w(Y.$$.fragment,e),w(me.$$.fragment,e),w(Je.$$.fragment,e),w(Ke.$$.fragment,e),w(Qe.$$.fragment,e),w(Ve.$$.fragment,e),w(Xe.$$.fragment,e),w(Ze.$$.fragment,e),w(et.$$.fragment,e),w(tt.$$.fragment,e),w(st.$$.fragment,e),w(lt.$$.fragment,e),w(nt.$$.fragment,e),w(ot.$$.fragment,e),w(pt.$$.fragment,e),w(ft.$$.fragment,e),w(ht.$$.fragment,e),w(mt.$$.fragment,e),w(ut.$$.fragment,e),w(dt.$$.fragment,e),w(gt.$$.fragment,e),w(ye.$$.fragment,e),w(kt.$$.fragment,e),w(jt.$$.fragment,e),w(Ee.$$.fragment,e),w(yt.$$.fragment,e),w(bt.$$.fragment,e),w(Et.$$.fragment,e),w(Tt.$$.fragment,e),w(qt.$$.fragment,e),w(Ft.$$.fragment,e),w(Ct.$$.fragment,e),w(Dt.$$.fragment,e),w(De.$$.fragment,e),w(It.$$.fragment,e),w(St.$$.fragment,e),w(Pe.$$.fragment,e),w(Nt.$$.fragment,e),w(Rt.$$.fragment,e),w(Bt.$$.fragment,e),w(Ht.$$.fragment,e),w(Yt.$$.fragment,e),w(Oe.$$.fragment,e),Jl=!0)},o(e){k(d.$$.fragment,e),k(W.$$.fragment,e),k(Y.$$.fragment,e),k(me.$$.fragment,e),k(Je.$$.fragment,e),k(Ke.$$.fragment,e),k(Qe.$$.fragment,e),k(Ve.$$.fragment,e),k(Xe.$$.fragment,e),k(Ze.$$.fragment,e),k(et.$$.fragment,e),k(tt.$$.fragment,e),k(st.$$.fragment,e),k(lt.$$.fragment,e),k(nt.$$.fragment,e),k(ot.$$.fragment,e),k(pt.$$.fragment,e),k(ft.$$.fragment,e),k(ht.$$.fragment,e),k(mt.$$.fragment,e),k(ut.$$.fragment,e),k(dt.$$.fragment,e),k(gt.$$.fragment,e),k(ye.$$.fragment,e),k(kt.$$.fragment,e),k(jt.$$.fragment,e),k(Ee.$$.fragment,e),k(yt.$$.fragment,e),k(bt.$$.fragment,e),k(Et.$$.fragment,e),k(Tt.$$.fragment,e),k(qt.$$.fragment,e),k(Ft.$$.fragment,e),k(Ct.$$.fragment,e),k(Dt.$$.fragment,e),k(De.$$.fragment,e),k(It.$$.fragment,e),k(St.$$.fragment,e),k(Pe.$$.fragment,e),k(Nt.$$.fragment,e),k(Rt.$$.fragment,e),k(Bt.$$.fragment,e),k(Ht.$$.fragment,e),k(Yt.$$.fragment,e),k(Oe.$$.fragment,e),Jl=!1},d(e){t(u),e&&t(E),e&&t(c),j(d),e&&t(y),e&&t(F),e&&t(L),j(W,e),e&&t(N),e&&t(U),e&&t(R),j(Y,e),e&&t(he),e&&t(M),e&&t(He),e&&t(D),e&&t(ba),j(me,e),e&&t(Ea),e&&t(se),j(Je),e&&t(xa),e&&t(Vt),e&&t(Ta),j(Ke,e),e&&t(Aa),e&&t(Xt),e&&t(qa),j(Qe,e),e&&t(Fa),e&&t(Zt),e&&t(Ma),j(Ve,e),e&&t(Ca),e&&t(B),e&&t(Da),e&&t(ae),j(Xe),e&&t(za),j(Ze,e),e&&t(Pa),e&&t(de),e&&t(La),j(et,e),e&&t(Ia),j(tt,e),e&&t(Sa),e&&t(es),e&&t(Oa),j(st,e),e&&t(Na),e&&t(J),e&&t(Ra),j(lt,e),e&&t(Ba),e&&t(K),e&&t(Ga),e&&t(ts),e&&t(Ha),j(nt,e),e&&t(Wa),e&&t(S),e&&t(Ya),j(ot,e),e&&t(Ua),e&&t(ss),e&&t(Ja),e&&t(ge),e&&t(Ka),j(pt,e),e&&t(Qa),e&&t(_e),e&&t(Va),j(ft,e),e&&t(Xa),e&&t(O),e&&t(Za),e&&t($e),e&&t(el),j(ht,e),e&&t(tl),e&&t(Q),e&&t(sl),j(mt,e),e&&t(al),e&&t(le),j(ut),e&&t(ll),e&&t(ke),e&&t(nl),e&&t(ne),j(dt),e&&t(rl),e&&t(ve),e&&t(ol),j(gt,e),e&&t(il),j(ye,e),e&&t(pl),e&&t(rs),e&&t(fl),e&&t(V),e&&t(hl),j(kt,e),e&&t(ml),e&&t(re),j(jt),e&&t(ul),e&&t(fs),e&&t(cl),j(Ee,e),e&&t(dl),e&&t(G),e&&t(gl),j(yt,e),e&&t(_l),e&&t(hs),e&&t($l),j(bt,e),e&&t(wl),e&&t(xe),e&&t(kl),j(Et,e),e&&t(jl),e&&t(Te),e&&t(vl),j(Tt,e),e&&t(yl),e&&t(Ae),e&&t(bl),j(qt,e),e&&t(El),e&&t(oe),j(Ft),e&&t(xl),e&&t(Fe),e&&t(Tl),e&&t(ie),j(Ct),e&&t(Al),e&&t(Ce),e&&t(ql),j(Dt,e),e&&t(Fl),j(De,e),e&&t(Ml),e&&t(us),e&&t(Cl),e&&t(X),e&&t(Dl),j(It,e),e&&t(zl),e&&t(pe),j(St),e&&t(Pl),e&&t(_s),e&&t(Ll),j(Pe,e),e&&t(Il),e&&t(H),e&&t(Sl),j(Nt,e),e&&t(Ol),e&&t($s),e&&t(Nl),j(Rt,e),e&&t(Rl),e&&t(Le),e&&t(Bl),j(Bt,e),e&&t(Gl),e&&t(Ie),e&&t(Hl),j(Ht,e),e&&t(Wl),e&&t(Se),e&&t(Yl),j(Yt,e),e&&t(Ul),j(Oe,e)}}}const _f={local:"language-modeling",sections:[{local:"load-eli5-dataset",title:"Load ELI5 dataset"},{local:"preprocess",title:"Preprocess"},{local:"causal-language-modeling",sections:[{local:"finetune-with-trainer",title:"Fine-tune with Trainer"},{local:"finetune-with-tensorflow",title:"Fine-tune with TensorFlow"}],title:"Causal language modeling"},{local:"masked-language-modeling",sections:[{local:"finetune-with-trainer",title:"Fine-tune with Trainer"},{local:"finetune-with-tensorflow",title:"Fine-tune with TensorFlow"}],title:"Masked language modeling"}],title:"Language modeling"};function $f(P,u,E){let{fw:c}=u;return P.$$set=v=>{"fw"in v&&E(0,c=v.fw)},[c]}class xf extends nf{constructor(u){super();rf(this,u,$f,gf,of,{fw:0})}}export{xf as default,_f as metadata};
414
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/tasks/asr.mdx-b010f320.js
import{S as _n,i as gn,s as jn,e as o,k as c,w as j,t as e,M as $n,c as r,d as t,m as h,a as i,x as $,h as n,b as u,F as a,g as p,y as v,q as b,o as w,B as y}from"../../chunks/vendor-4833417e.js";import{T as Ee}from"../../chunks/Tip-fffd6df1.js";import{Y as vn}from"../../chunks/Youtube-27813aed.js";import{I as tt}from"../../chunks/IconCopyLink-4b81c553.js";import{C as P}from"../../chunks/CodeBlock-6a3d1b46.js";import"../../chunks/CopyButton-dacfbfaf.js";function bn(N){let f,x,d,_,E;return{c(){f=o("p"),x=e("See the automatic speech recognition "),d=o("a"),_=e("task page"),E=e(" for more information about its associated models, datasets, and metrics."),this.h()},l(m){f=r(m,"P",{});var k=i(f);x=n(k,"See the automatic speech recognition "),d=r(k,"A",{href:!0,rel:!0});var T=i(d);_=n(T,"task page"),T.forEach(t),E=n(k," for more information about its associated models, datasets, and metrics."),k.forEach(t),this.h()},h(){u(d,"href","https://huggingface.co/tasks/automatic-speech-recognition"),u(d,"rel","nofollow")},m(m,k){p(m,f,k),a(f,x),a(f,d),a(d,_),a(f,E)},d(m){m&&t(f)}}}function wn(N){let f,x,d,_,E,m,k,T;return{c(){f=o("p"),x=e("If you aren\u2019t familiar with fine-tuning a model with the "),d=o("a"),_=e("Trainer"),E=e(", take a look at the basic tutorial "),m=o("a"),k=e("here"),T=e("!"),this.h()},l(q){f=r(q,"P",{});var g=i(f);x=n(g,"If you aren\u2019t familiar with fine-tuning a model with the "),d=r(g,"A",{href:!0});var C=i(d);_=n(C,"Trainer"),C.forEach(t),E=n(g,", take a look at the basic tutorial "),m=r(g,"A",{href:!0});var I=i(m);k=n(I,"here"),I.forEach(t),T=n(g,"!"),g.forEach(t),this.h()},h(){u(d,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),u(m,"href","training#finetune-with-trainer")},m(q,g){p(q,f,g),a(f,x),a(f,d),a(d,_),a(f,E),a(f,m),a(m,k),a(f,T)},d(q){q&&t(f)}}}function yn(N){let f,x,d,_,E,m,k,T;return{c(){f=o("p"),x=e("For a more in-depth example of how to fine-tune a model for automatic speech recognition, take a look at this blog "),d=o("a"),_=e("post"),E=e(" for English ASR and this "),m=o("a"),k=e("post"),T=e(" for multilingual ASR."),this.h()},l(q){f=r(q,"P",{});var g=i(f);x=n(g,"For a more in-depth example of how to fine-tune a model for automatic speech recognition, take a look at this blog "),d=r(g,"A",{href:!0,rel:!0});var C=i(d);_=n(C,"post"),C.forEach(t),E=n(g," for English ASR and this "),m=r(g,"A",{href:!0,rel:!0});var I=i(m);k=n(I,"post"),I.forEach(t),T=n(g," for multilingual ASR."),g.forEach(t),this.h()},h(){u(d,"href","https://huggingface.co/blog/fine-tune-wav2vec2-english"),u(d,"rel","nofollow"),u(m,"href","https://huggingface.co/blog/fine-tune-xlsr-wav2vec2"),u(m,"rel","nofollow")},m(q,g){p(q,f,g),a(f,x),a(f,d),a(d,_),a(f,E),a(f,m),a(m,k),a(f,T)},d(q){q&&t(f)}}}function xn(N){let f,x,d,_,E,m,k,T,q,g,C,I,ys,et,ua,S,nt,ss,lt,ot,as,rt,it,da,H,ma,U,B,Ns,ts,pt,Us,ct,_a,xs,ht,ga,es,ja,ks,ft,$a,ns,va,A,ut,Vs,dt,mt,Ys,_t,gt,Hs,jt,$t,Bs,vt,bt,ba,ls,wa,Es,wt,ya,os,xa,L,yt,Js,xt,kt,Gs,Et,Tt,ka,V,J,Ks,rs,qt,Qs,At,Ea,Ts,Ct,Ta,is,qa,qs,Dt,Aa,M,ps,Pt,Xs,It,St,Lt,cs,Mt,Zs,Wt,Ot,Ft,sa,zt,Ca,hs,Da,W,Rt,fs,aa,Nt,Ut,ta,Vt,Yt,Pa,us,Ia,D,Ht,As,Bt,Jt,ea,Gt,Kt,na,Qt,Xt,Sa,O,Zt,la,se,ae,oa,te,ee,La,ds,Ma,G,ne,ra,le,oe,Wa,ms,Oa,Y,K,ia,_s,re,pa,ie,Fa,F,pe,Cs,ce,he,ca,fe,ue,za,gs,Ra,Q,Na,Ds,de,Ua,z,js,me,Ps,_e,ge,je,$s,$e,Is,ve,be,we,vs,ye,Ss,xe,ke,Va,bs,Ya,X,Ha;return m=new tt({}),C=new vn({props:{id:"TksaY_FDgnk"}}),H=new Ee({props:{$$slots:{default:[bn]},$$scope:{ctx:N}}}),ts=new tt({}),es=new P({props:{code:`from datasets import load_dataset timit = load_dataset("timit_asr")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>timit = load_dataset(<span class="hljs-string">&quot;timit_asr&quot;</span>)`}}),ns=new P({props:{code:"timit",highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>timit DatasetDict({ train: Dataset({ features: [<span class="hljs-string">&#x27;file&#x27;</span>, <span class="hljs-string">&#x27;audio&#x27;</span>, <span class="hljs-string">&#x27;text&#x27;</span>, <span class="hljs-string">&#x27;phonetic_detail&#x27;</span>, <span class="hljs-string">&#x27;word_detail&#x27;</span>, <span class="hljs-string">&#x27;dialect_region&#x27;</span>, <span class="hljs-string">&#x27;sentence_type&#x27;</span>, <span class="hljs-string">&#x27;speaker_id&#x27;</span>, <span class="hljs-string">&#x27;id&#x27;</span>], num_rows: <span class="hljs-number">4620</span> }) test: Dataset({ features: [<span class="hljs-string">&#x27;file&#x27;</span>, <span class="hljs-string">&#x27;audio&#x27;</span>, <span class="hljs-string">&#x27;text&#x27;</span>, <span class="hljs-string">&#x27;phonetic_detail&#x27;</span>, <span class="hljs-string">&#x27;word_detail&#x27;</span>, <span class="hljs-string">&#x27;dialect_region&#x27;</span>, <span class="hljs-string">&#x27;sentence_type&#x27;</span>, <span class="hljs-string">&#x27;speaker_id&#x27;</span>, <span class="hljs-string">&#x27;id&#x27;</span>], num_rows: <span class="hljs-number">1680</span> }) })`}}),ls=new P({props:{code:`timit = timit.remove_columns( ["phonetic_detail", "word_detail", "dialect_region", "id", "sentence_type", "speaker_id"] )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>timit = timit.remove_columns( <span class="hljs-meta">... </span> [<span class="hljs-string">&quot;phonetic_detail&quot;</span>, <span class="hljs-string">&quot;word_detail&quot;</span>, <span class="hljs-string">&quot;dialect_region&quot;</span>, <span class="hljs-string">&quot;id&quot;</span>, <span class="hljs-string">&quot;sentence_type&quot;</span>, <span class="hljs-string">&quot;speaker_id&quot;</span>] <span class="hljs-meta">... </span>)`}}),os=new P({props:{code:'timit["train"][0]',highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>timit[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;audio&#x27;</span>: {<span class="hljs-string">&#x27;array&#x27;</span>: array([-<span class="hljs-number">2.1362305e-04</span>, <span class="hljs-number">6.1035156e-05</span>, <span class="hljs-number">3.0517578e-05</span>, ..., -<span class="hljs-number">3.0517578e-05</span>, -<span class="hljs-number">9.1552734e-05</span>, -<span class="hljs-number">6.1035156e-05</span>], dtype=float32), <span class="hljs-string">&#x27;path&#x27;</span>: <span class="hljs-string">&#x27;/root/.cache/huggingface/datasets/downloads/extracted/404950a46da14eac65eb4e2a8317b1372fb3971d980d91d5d5b221275b1fd7e0/data/TRAIN/DR4/MMDM0/SI681.WAV&#x27;</span>, <span class="hljs-string">&#x27;sampling_rate&#x27;</span>: <span class="hljs-number">16000</span>}, <span class="hljs-string">&#x27;file&#x27;</span>: <span class="hljs-string">&#x27;/root/.cache/huggingface/datasets/downloads/extracted/404950a46da14eac65eb4e2a8317b1372fb3971d980d91d5d5b221275b1fd7e0/data/TRAIN/DR4/MMDM0/SI681.WAV&#x27;</span>, <span class="hljs-string">&#x27;text&#x27;</span>: <span class="hljs-string">&#x27;Would such an act of refusal be useful?&#x27;</span>}`}}),rs=new tt({}),is=new P({props:{code:`from transformers import AutoProcessor processor = AutoProcessor.from_pretrained("facebook/wav2vec2-base")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoProcessor <span class="hljs-meta">&gt;&gt;&gt; </span>processor = AutoProcessor.from_pretrained(<span class="hljs-string">&quot;facebook/wav2vec2-base&quot;</span>)`}}),hs=new P({props:{code:`def prepare_dataset(batch): audio = batch["audio"] batch["input_values"] = processor(audio["array"], sampling_rate=audio["sampling_rate"]).input_values[0] batch["input_length"] = len(batch["input_values"]) with processor.as_target_processor(): batch["labels"] = processor(batch["text"]).input_ids return batch`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">prepare_dataset</span>(<span class="hljs-params">batch</span>): <span class="hljs-meta">... </span> audio = batch[<span class="hljs-string">&quot;audio&quot;</span>] <span class="hljs-meta">... </span> batch[<span class="hljs-string">&quot;input_values&quot;</span>] = processor(audio[<span class="hljs-string">&quot;array&quot;</span>], sampling_rate=audio[<span class="hljs-string">&quot;sampling_rate&quot;</span>]).input_values[<span class="hljs-number">0</span>] <span class="hljs-meta">... </span> batch[<span class="hljs-string">&quot;input_length&quot;</span>] = <span class="hljs-built_in">len</span>(batch[<span class="hljs-string">&quot;input_values&quot;</span>]) <span class="hljs-meta">... </span> <span class="hljs-keyword">with</span> processor.as_target_processor(): <span class="hljs-meta">... </span> batch[<span class="hljs-string">&quot;labels&quot;</span>] = processor(batch[<span class="hljs-string">&quot;text&quot;</span>]).input_ids <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> batch`}}),us=new P({props:{code:'timit = timit.map(prepare_dataset, remove_columns=timit.column_names["train"], num_proc=4)',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>timit = timit.<span class="hljs-built_in">map</span>(prepare_dataset, remove_columns=timit.column_names[<span class="hljs-string">&quot;train&quot;</span>], num_proc=<span class="hljs-number">4</span>)'}}),ds=new P({props:{code:`import torch from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union @dataclass class DataCollatorCTCWithPadding: processor: AutoProcessor padding: Union[bool, str] = True def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: # split inputs and labels since they have to be of different lengths and need # different padding methods input_features = [{"input_values": feature["input_values"]} for feature in features] label_features = [{"input_ids": feature["labels"]} for feature in features] batch = self.processor.pad( input_features, padding=self.padding, return_tensors="pt", ) with self.processor.as_target_processor(): labels_batch = self.processor.pad( label_features, padding=self.padding, return_tensors="pt", ) # replace padding with -100 to ignore loss correctly labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100) batch["labels"] = labels return batch`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> dataclasses <span class="hljs-keyword">import</span> dataclass, field <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> typing <span class="hljs-keyword">import</span> <span class="hljs-type">Any</span>, <span class="hljs-type">Dict</span>, <span class="hljs-type">List</span>, <span class="hljs-type">Optional</span>, <span class="hljs-type">Union</span> <span class="hljs-meta">&gt;&gt;&gt; </span>@dataclass <span class="hljs-meta">... </span><span class="hljs-keyword">class</span> <span class="hljs-title class_">DataCollatorCTCWithPadding</span>: <span class="hljs-meta">... </span> processor: AutoProcessor <span class="hljs-meta">... </span> padding: <span class="hljs-type">Union</span>[<span class="hljs-built_in">bool</span>, <span class="hljs-built_in">str</span>] = <span class="hljs-literal">True</span> <span class="hljs-meta">... </span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">__call__</span>(<span class="hljs-params">self, features: <span class="hljs-type">List</span>[<span class="hljs-type">Dict</span>[<span class="hljs-built_in">str</span>, <span class="hljs-type">Union</span>[<span class="hljs-type">List</span>[<span class="hljs-built_in">int</span>], torch.Tensor]]]</span>) -&gt; <span class="hljs-type">Dict</span>[<span class="hljs-built_in">str</span>, torch.Tensor]: <span class="hljs-meta">... </span> <span class="hljs-comment"># split inputs and labels since they have to be of different lengths and need</span> <span class="hljs-meta">... </span> <span class="hljs-comment"># different padding methods</span> <span class="hljs-meta">... </span> input_features = [{<span class="hljs-string">&quot;input_values&quot;</span>: feature[<span class="hljs-string">&quot;input_values&quot;</span>]} <span class="hljs-keyword">for</span> feature <span class="hljs-keyword">in</span> features] <span class="hljs-meta">... </span> label_features = [{<span class="hljs-string">&quot;input_ids&quot;</span>: feature[<span class="hljs-string">&quot;labels&quot;</span>]} <span class="hljs-keyword">for</span> feature <span class="hljs-keyword">in</span> features] <span class="hljs-meta">... </span> batch = self.processor.pad( <span class="hljs-meta">... </span> input_features, <span class="hljs-meta">... </span> padding=self.padding, <span class="hljs-meta">... </span> return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, <span class="hljs-meta">... </span> ) <span class="hljs-meta">... </span> <span class="hljs-keyword">with</span> self.processor.as_target_processor(): <span class="hljs-meta">... </span> labels_batch = self.processor.pad( <span class="hljs-meta">... </span> label_features, <span class="hljs-meta">... </span> padding=self.padding, <span class="hljs-meta">... </span> return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, <span class="hljs-meta">... </span> ) <span class="hljs-meta">... </span> <span class="hljs-comment"># replace padding with -100 to ignore loss correctly</span> <span class="hljs-meta">... </span> labels = labels_batch[<span class="hljs-string">&quot;input_ids&quot;</span>].masked_fill(labels_batch.attention_mask.ne(<span class="hljs-number">1</span>), -<span class="hljs-number">100</span>) <span class="hljs-meta">... </span> batch[<span class="hljs-string">&quot;labels&quot;</span>] = labels <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> batch`}}),ms=new P({props:{code:"data_collator = DataCollatorCTCWithPadding(processor=processor, padding=True)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorCTCWithPadding(processor=processor, padding=<span class="hljs-literal">True</span>)'}}),_s=new tt({}),gs=new P({props:{code:`from transformers import AutoModelForCTC, TrainingArguments, Trainer model = AutoModelForCTC.from_pretrained( "facebook/wav2vec-base", ctc_loss_reduction="mean", pad_token_id=processor.tokenizer.pad_token_id, )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForCTC, TrainingArguments, Trainer <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForCTC.from_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;facebook/wav2vec-base&quot;</span>, <span class="hljs-meta">... </span> ctc_loss_reduction=<span class="hljs-string">&quot;mean&quot;</span>, <span class="hljs-meta">... </span> pad_token_id=processor.tokenizer.pad_token_id, <span class="hljs-meta">... </span>)`}}),Q=new Ee({props:{$$slots:{default:[wn]},$$scope:{ctx:N}}}),bs=new P({props:{code:`training_args = TrainingArguments( output_dir="./results", group_by_length=True, per_device_train_batch_size=16, evaluation_strategy="steps", num_train_epochs=3, fp16=True, gradient_checkpointing=True, learning_rate=1e-4, weight_decay=0.005, save_total_limit=2, ) trainer = Trainer( model=model, args=training_args, train_dataset=timit["train"], eval_dataset=timit["test"], tokenizer=processor.feature_extractor, data_collator=data_collator, ) trainer.train()`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> group_by_length=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> per_device_train_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> evaluation_strategy=<span class="hljs-string">&quot;steps&quot;</span>, <span class="hljs-meta">... </span> num_train_epochs=<span class="hljs-number">3</span>, <span class="hljs-meta">... </span> fp16=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> gradient_checkpointing=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">1e-4</span>, <span class="hljs-meta">... </span> weight_decay=<span class="hljs-number">0.005</span>, <span class="hljs-meta">... </span> save_total_limit=<span class="hljs-number">2</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=timit[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=timit[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> tokenizer=processor.feature_extractor, <span class="hljs-meta">... </span> data_collator=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()`}}),X=new Ee({props:{$$slots:{default:[yn]},$$scope:{ctx:N}}}),{c(){f=o("meta"),x=c(),d=o("h1"),_=o("a"),E=o("span"),j(m.$$.fragment),k=c(),T=o("span"),q=e("Automatic speech recognition"),g=c(),j(C.$$.fragment),I=c(),ys=o("p"),et=e("Automatic speech recognition (ASR) converts a speech signal to text. It is an example of a sequence-to-sequence task, going from a sequence of audio inputs to textual outputs. Voice assistants like Siri and Alexa utilize ASR models to assist users."),ua=c(),S=o("p"),nt=e("This guide will show you how to fine-tune "),ss=o("a"),lt=e("Wav2Vec2"),ot=e(" on the "),as=o("a"),rt=e("TIMIT"),it=e(" dataset to transcribe audio to text."),da=c(),j(H.$$.fragment),ma=c(),U=o("h2"),B=o("a"),Ns=o("span"),j(ts.$$.fragment),pt=c(),Us=o("span"),ct=e("Load TIMIT dataset"),_a=c(),xs=o("p"),ht=e("Load the TIMIT dataset from the \u{1F917} Datasets library:"),ga=c(),j(es.$$.fragment),ja=c(),ks=o("p"),ft=e("Then take a look at an example:"),$a=c(),j(ns.$$.fragment),va=c(),A=o("p"),ut=e("While the dataset contains a lot of helpful information, like "),Vs=o("code"),dt=e("dialect_region"),mt=e(" and "),Ys=o("code"),_t=e("sentence_type"),gt=e(", you will focus on the "),Hs=o("code"),jt=e("audio"),$t=e(" and "),Bs=o("code"),vt=e("text"),bt=e(" fields in this guide. Remove the other columns:"),ba=c(),j(ls.$$.fragment),wa=c(),Es=o("p"),wt=e("Take a look at the example again:"),ya=c(),j(os.$$.fragment),xa=c(),L=o("p"),yt=e("The "),Js=o("code"),xt=e("audio"),kt=e(" column contains a 1-dimensional "),Gs=o("code"),Et=e("array"),Tt=e(" of the speech signal that must be called to load and resample the audio file."),ka=c(),V=o("h2"),J=o("a"),Ks=o("span"),j(rs.$$.fragment),qt=c(),Qs=o("span"),At=e("Preprocess"),Ea=c(),Ts=o("p"),Ct=e("Load the Wav2Vec2 processor to process the audio signal and transcribed text:"),Ta=c(),j(is.$$.fragment),qa=c(),qs=o("p"),Dt=e("The preprocessing function needs to:"),Aa=c(),M=o("ol"),ps=o("li"),Pt=e("Call the "),Xs=o("code"),It=e("audio"),St=e(" column to load and resample the audio file."),Lt=c(),cs=o("li"),Mt=e("Extract the "),Zs=o("code"),Wt=e("input_values"),Ot=e(" from the audio file."),Ft=c(),sa=o("li"),zt=e("Typically, when you call the processor, you call the feature extractor. Since you also want to tokenize text, instruct the processor to call the tokenizer instead with a context manager."),Ca=c(),j(hs.$$.fragment),Da=c(),W=o("p"),Rt=e("Use \u{1F917} Datasets "),fs=o("a"),aa=o("code"),Nt=e("map"),Ut=e(" function to apply the preprocessing function over the entire dataset. You can speed up the map function by increasing the number of processes with "),ta=o("code"),Vt=e("num_proc"),Yt=e(". Remove the columns you don\u2019t need:"),Pa=c(),j(us.$$.fragment),Ia=c(),D=o("p"),Ht=e("\u{1F917} Transformers doesn\u2019t have a data collator for automatic speech recognition, so you will need to create one. You can adapt the "),As=o("a"),Bt=e("DataCollatorWithPadding"),Jt=e(" to create a batch of examples for automatic speech recognition. It will also dynamically pad your text and labels to the length of the longest element in its batch, so they are a uniform length. While it is possible to pad your text in the "),ea=o("code"),Gt=e("tokenizer"),Kt=e(" function by setting "),na=o("code"),Qt=e("padding=True"),Xt=e(", dynamic padding is more efficient."),Sa=c(),O=o("p"),Zt=e("Unlike other data collators, this specific data collator needs to apply a different padding method to "),la=o("code"),se=e("input_values"),ae=e(" and "),oa=o("code"),te=e("labels"),ee=e(". You can apply a different padding method with a context manager:"),La=c(),j(ds.$$.fragment),Ma=c(),G=o("p"),ne=e("Create a batch of examples and dynamically pad them with "),ra=o("code"),le=e("DataCollatorForCTCWithPadding"),oe=e(":"),Wa=c(),j(ms.$$.fragment),Oa=c(),Y=o("h2"),K=o("a"),ia=o("span"),j(_s.$$.fragment),re=c(),pa=o("span"),ie=e("Fine-tune with Trainer"),Fa=c(),F=o("p"),pe=e("Load Wav2Vec2 with "),Cs=o("a"),ce=e("AutoModelForCTC"),he=e(". For "),ca=o("code"),fe=e("ctc_loss_reduction"),ue=e(", it is often better to use the average instead of the default summation:"),za=c(),j(gs.$$.fragment),Ra=c(),j(Q.$$.fragment),Na=c(),Ds=o("p"),de=e("At this point, only three steps remain:"),Ua=c(),z=o("ol"),js=o("li"),me=e("Define your training hyperparameters in "),Ps=o("a"),_e=e("TrainingArguments"),ge=e("."),je=c(),$s=o("li"),$e=e("Pass the training arguments to "),Is=o("a"),ve=e("Trainer"),be=e(" along with the model, datasets, tokenizer, and data collator."),we=c(),vs=o("li"),ye=e("Call "),Ss=o("a"),xe=e("train()"),ke=e(" to fine-tune your model."),Va=c(),j(bs.$$.fragment),Ya=c(),j(X.$$.fragment),this.h()},l(s){const l=$n('[data-svelte="svelte-1phssyn"]',document.head);f=r(l,"META",{name:!0,content:!0}),l.forEach(t),x=h(s),d=r(s,"H1",{class:!0});var ws=i(d);_=r(ws,"A",{id:!0,class:!0,href:!0});var ha=i(_);E=r(ha,"SPAN",{});var fa=i(E);$(m.$$.fragment,fa),fa.forEach(t),ha.forEach(t),k=h(ws),T=r(ws,"SPAN",{});var Te=i(T);q=n(Te,"Automatic speech recognition"),Te.forEach(t),ws.forEach(t),g=h(s),$(C.$$.fragment,s),I=h(s),ys=r(s,"P",{});var qe=i(ys);et=n(qe,"Automatic speech recognition (ASR) converts a speech signal to text. It is an example of a sequence-to-sequence task, going from a sequence of audio inputs to textual outputs. Voice assistants like Siri and Alexa utilize ASR models to assist users."),qe.forEach(t),ua=h(s),S=r(s,"P",{});var Ls=i(S);nt=n(Ls,"This guide will show you how to fine-tune "),ss=r(Ls,"A",{href:!0,rel:!0});var Ae=i(ss);lt=n(Ae,"Wav2Vec2"),Ae.forEach(t),ot=n(Ls," on the "),as=r(Ls,"A",{href:!0,rel:!0});var Ce=i(as);rt=n(Ce,"TIMIT"),Ce.forEach(t),it=n(Ls," dataset to transcribe audio to text."),Ls.forEach(t),da=h(s),$(H.$$.fragment,s),ma=h(s),U=r(s,"H2",{class:!0});var Ba=i(U);B=r(Ba,"A",{id:!0,class:!0,href:!0});var De=i(B);Ns=r(De,"SPAN",{});var Pe=i(Ns);$(ts.$$.fragment,Pe),Pe.forEach(t),De.forEach(t),pt=h(Ba),Us=r(Ba,"SPAN",{});var Ie=i(Us);ct=n(Ie,"Load TIMIT dataset"),Ie.forEach(t),Ba.forEach(t),_a=h(s),xs=r(s,"P",{});var Se=i(xs);ht=n(Se,"Load the TIMIT dataset from the \u{1F917} Datasets library:"),Se.forEach(t),ga=h(s),$(es.$$.fragment,s),ja=h(s),ks=r(s,"P",{});var Le=i(ks);ft=n(Le,"Then take a look at an example:"),Le.forEach(t),$a=h(s),$(ns.$$.fragment,s),va=h(s),A=r(s,"P",{});var R=i(A);ut=n(R,"While the dataset contains a lot of helpful information, like "),Vs=r(R,"CODE",{});var Me=i(Vs);dt=n(Me,"dialect_region"),Me.forEach(t),mt=n(R," and "),Ys=r(R,"CODE",{});var We=i(Ys);_t=n(We,"sentence_type"),We.forEach(t),gt=n(R,", you will focus on the "),Hs=r(R,"CODE",{});var Oe=i(Hs);jt=n(Oe,"audio"),Oe.forEach(t),$t=n(R," and "),Bs=r(R,"CODE",{});var Fe=i(Bs);vt=n(Fe,"text"),Fe.forEach(t),bt=n(R," fields in this guide. Remove the other columns:"),R.forEach(t),ba=h(s),$(ls.$$.fragment,s),wa=h(s),Es=r(s,"P",{});var ze=i(Es);wt=n(ze,"Take a look at the example again:"),ze.forEach(t),ya=h(s),$(os.$$.fragment,s),xa=h(s),L=r(s,"P",{});var Ms=i(L);yt=n(Ms,"The "),Js=r(Ms,"CODE",{});var Re=i(Js);xt=n(Re,"audio"),Re.forEach(t),kt=n(Ms," column contains a 1-dimensional "),Gs=r(Ms,"CODE",{});var Ne=i(Gs);Et=n(Ne,"array"),Ne.forEach(t),Tt=n(Ms," of the speech signal that must be called to load and resample the audio file."),Ms.forEach(t),ka=h(s),V=r(s,"H2",{class:!0});var Ja=i(V);J=r(Ja,"A",{id:!0,class:!0,href:!0});var Ue=i(J);Ks=r(Ue,"SPAN",{});var Ve=i(Ks);$(rs.$$.fragment,Ve),Ve.forEach(t),Ue.forEach(t),qt=h(Ja),Qs=r(Ja,"SPAN",{});var Ye=i(Qs);At=n(Ye,"Preprocess"),Ye.forEach(t),Ja.forEach(t),Ea=h(s),Ts=r(s,"P",{});var He=i(Ts);Ct=n(He,"Load the Wav2Vec2 processor to process the audio signal and transcribed text:"),He.forEach(t),Ta=h(s),$(is.$$.fragment,s),qa=h(s),qs=r(s,"P",{});var Be=i(qs);Dt=n(Be,"The preprocessing function needs to:"),Be.forEach(t),Aa=h(s),M=r(s,"OL",{});var Ws=i(M);ps=r(Ws,"LI",{});var Ga=i(ps);Pt=n(Ga,"Call the "),Xs=r(Ga,"CODE",{});var Je=i(Xs);It=n(Je,"audio"),Je.forEach(t),St=n(Ga," column to load and resample the audio file."),Ga.forEach(t),Lt=h(Ws),cs=r(Ws,"LI",{});var Ka=i(cs);Mt=n(Ka,"Extract the "),Zs=r(Ka,"CODE",{});var Ge=i(Zs);Wt=n(Ge,"input_values"),Ge.forEach(t),Ot=n(Ka," from the audio file."),Ka.forEach(t),Ft=h(Ws),sa=r(Ws,"LI",{});var Ke=i(sa);zt=n(Ke,"Typically, when you call the processor, you call the feature extractor. Since you also want to tokenize text, instruct the processor to call the tokenizer instead with a context manager."),Ke.forEach(t),Ws.forEach(t),Ca=h(s),$(hs.$$.fragment,s),Da=h(s),W=r(s,"P",{});var Os=i(W);Rt=n(Os,"Use \u{1F917} Datasets "),fs=r(Os,"A",{href:!0,rel:!0});var Qe=i(fs);aa=r(Qe,"CODE",{});var Xe=i(aa);Nt=n(Xe,"map"),Xe.forEach(t),Qe.forEach(t),Ut=n(Os," function to apply the preprocessing function over the entire dataset. You can speed up the map function by increasing the number of processes with "),ta=r(Os,"CODE",{});var Ze=i(ta);Vt=n(Ze,"num_proc"),Ze.forEach(t),Yt=n(Os,". Remove the columns you don\u2019t need:"),Os.forEach(t),Pa=h(s),$(us.$$.fragment,s),Ia=h(s),D=r(s,"P",{});var Z=i(D);Ht=n(Z,"\u{1F917} Transformers doesn\u2019t have a data collator for automatic speech recognition, so you will need to create one. You can adapt the "),As=r(Z,"A",{href:!0});var sn=i(As);Bt=n(sn,"DataCollatorWithPadding"),sn.forEach(t),Jt=n(Z," to create a batch of examples for automatic speech recognition. It will also dynamically pad your text and labels to the length of the longest element in its batch, so they are a uniform length. While it is possible to pad your text in the "),ea=r(Z,"CODE",{});var an=i(ea);Gt=n(an,"tokenizer"),an.forEach(t),Kt=n(Z," function by setting "),na=r(Z,"CODE",{});var tn=i(na);Qt=n(tn,"padding=True"),tn.forEach(t),Xt=n(Z,", dynamic padding is more efficient."),Z.forEach(t),Sa=h(s),O=r(s,"P",{});var Fs=i(O);Zt=n(Fs,"Unlike other data collators, this specific data collator needs to apply a different padding method to "),la=r(Fs,"CODE",{});var en=i(la);se=n(en,"input_values"),en.forEach(t),ae=n(Fs," and "),oa=r(Fs,"CODE",{});var nn=i(oa);te=n(nn,"labels"),nn.forEach(t),ee=n(Fs,". You can apply a different padding method with a context manager:"),Fs.forEach(t),La=h(s),$(ds.$$.fragment,s),Ma=h(s),G=r(s,"P",{});var Qa=i(G);ne=n(Qa,"Create a batch of examples and dynamically pad them with "),ra=r(Qa,"CODE",{});var ln=i(ra);le=n(ln,"DataCollatorForCTCWithPadding"),ln.forEach(t),oe=n(Qa,":"),Qa.forEach(t),Wa=h(s),$(ms.$$.fragment,s),Oa=h(s),Y=r(s,"H2",{class:!0});var Xa=i(Y);K=r(Xa,"A",{id:!0,class:!0,href:!0});var on=i(K);ia=r(on,"SPAN",{});var rn=i(ia);$(_s.$$.fragment,rn),rn.forEach(t),on.forEach(t),re=h(Xa),pa=r(Xa,"SPAN",{});var pn=i(pa);ie=n(pn,"Fine-tune with Trainer"),pn.forEach(t),Xa.forEach(t),Fa=h(s),F=r(s,"P",{});var zs=i(F);pe=n(zs,"Load Wav2Vec2 with "),Cs=r(zs,"A",{href:!0});var cn=i(Cs);ce=n(cn,"AutoModelForCTC"),cn.forEach(t),he=n(zs,". For "),ca=r(zs,"CODE",{});var hn=i(ca);fe=n(hn,"ctc_loss_reduction"),hn.forEach(t),ue=n(zs,", it is often better to use the average instead of the default summation:"),zs.forEach(t),za=h(s),$(gs.$$.fragment,s),Ra=h(s),$(Q.$$.fragment,s),Na=h(s),Ds=r(s,"P",{});var fn=i(Ds);de=n(fn,"At this point, only three steps remain:"),fn.forEach(t),Ua=h(s),z=r(s,"OL",{});var Rs=i(z);js=r(Rs,"LI",{});var Za=i(js);me=n(Za,"Define your training hyperparameters in "),Ps=r(Za,"A",{href:!0});var un=i(Ps);_e=n(un,"TrainingArguments"),un.forEach(t),ge=n(Za,"."),Za.forEach(t),je=h(Rs),$s=r(Rs,"LI",{});var st=i($s);$e=n(st,"Pass the training arguments to "),Is=r(st,"A",{href:!0});var dn=i(Is);ve=n(dn,"Trainer"),dn.forEach(t),be=n(st," along with the model, datasets, tokenizer, and data collator."),st.forEach(t),we=h(Rs),vs=r(Rs,"LI",{});var at=i(vs);ye=n(at,"Call "),Ss=r(at,"A",{href:!0});var mn=i(Ss);xe=n(mn,"train()"),mn.forEach(t),ke=n(at," to fine-tune your model."),at.forEach(t),Rs.forEach(t),Va=h(s),$(bs.$$.fragment,s),Ya=h(s),$(X.$$.fragment,s),this.h()},h(){u(f,"name","hf:doc:metadata"),u(f,"content",JSON.stringify(kn)),u(_,"id","automatic-speech-recognition"),u(_,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(_,"href","#automatic-speech-recognition"),u(d,"class","relative group"),u(ss,"href","https://huggingface.co/facebook/wav2vec2-base"),u(ss,"rel","nofollow"),u(as,"href","https://huggingface.co/datasets/timit_asr"),u(as,"rel","nofollow"),u(B,"id","load-timit-dataset"),u(B,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(B,"href","#load-timit-dataset"),u(U,"class","relative group"),u(J,"id","preprocess"),u(J,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(J,"href","#preprocess"),u(V,"class","relative group"),u(fs,"href","https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map"),u(fs,"rel","nofollow"),u(As,"href","/docs/transformers/pr_16143/en/main_classes/data_collator#transformers.DataCollatorWithPadding"),u(K,"id","finetune-with-trainer"),u(K,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(K,"href","#finetune-with-trainer"),u(Y,"class","relative group"),u(Cs,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoModelForCTC"),u(Ps,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments"),u(Is,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),u(Ss,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer.train")},m(s,l){a(document.head,f),p(s,x,l),p(s,d,l),a(d,_),a(_,E),v(m,E,null),a(d,k),a(d,T),a(T,q),p(s,g,l),v(C,s,l),p(s,I,l),p(s,ys,l),a(ys,et),p(s,ua,l),p(s,S,l),a(S,nt),a(S,ss),a(ss,lt),a(S,ot),a(S,as),a(as,rt),a(S,it),p(s,da,l),v(H,s,l),p(s,ma,l),p(s,U,l),a(U,B),a(B,Ns),v(ts,Ns,null),a(U,pt),a(U,Us),a(Us,ct),p(s,_a,l),p(s,xs,l),a(xs,ht),p(s,ga,l),v(es,s,l),p(s,ja,l),p(s,ks,l),a(ks,ft),p(s,$a,l),v(ns,s,l),p(s,va,l),p(s,A,l),a(A,ut),a(A,Vs),a(Vs,dt),a(A,mt),a(A,Ys),a(Ys,_t),a(A,gt),a(A,Hs),a(Hs,jt),a(A,$t),a(A,Bs),a(Bs,vt),a(A,bt),p(s,ba,l),v(ls,s,l),p(s,wa,l),p(s,Es,l),a(Es,wt),p(s,ya,l),v(os,s,l),p(s,xa,l),p(s,L,l),a(L,yt),a(L,Js),a(Js,xt),a(L,kt),a(L,Gs),a(Gs,Et),a(L,Tt),p(s,ka,l),p(s,V,l),a(V,J),a(J,Ks),v(rs,Ks,null),a(V,qt),a(V,Qs),a(Qs,At),p(s,Ea,l),p(s,Ts,l),a(Ts,Ct),p(s,Ta,l),v(is,s,l),p(s,qa,l),p(s,qs,l),a(qs,Dt),p(s,Aa,l),p(s,M,l),a(M,ps),a(ps,Pt),a(ps,Xs),a(Xs,It),a(ps,St),a(M,Lt),a(M,cs),a(cs,Mt),a(cs,Zs),a(Zs,Wt),a(cs,Ot),a(M,Ft),a(M,sa),a(sa,zt),p(s,Ca,l),v(hs,s,l),p(s,Da,l),p(s,W,l),a(W,Rt),a(W,fs),a(fs,aa),a(aa,Nt),a(W,Ut),a(W,ta),a(ta,Vt),a(W,Yt),p(s,Pa,l),v(us,s,l),p(s,Ia,l),p(s,D,l),a(D,Ht),a(D,As),a(As,Bt),a(D,Jt),a(D,ea),a(ea,Gt),a(D,Kt),a(D,na),a(na,Qt),a(D,Xt),p(s,Sa,l),p(s,O,l),a(O,Zt),a(O,la),a(la,se),a(O,ae),a(O,oa),a(oa,te),a(O,ee),p(s,La,l),v(ds,s,l),p(s,Ma,l),p(s,G,l),a(G,ne),a(G,ra),a(ra,le),a(G,oe),p(s,Wa,l),v(ms,s,l),p(s,Oa,l),p(s,Y,l),a(Y,K),a(K,ia),v(_s,ia,null),a(Y,re),a(Y,pa),a(pa,ie),p(s,Fa,l),p(s,F,l),a(F,pe),a(F,Cs),a(Cs,ce),a(F,he),a(F,ca),a(ca,fe),a(F,ue),p(s,za,l),v(gs,s,l),p(s,Ra,l),v(Q,s,l),p(s,Na,l),p(s,Ds,l),a(Ds,de),p(s,Ua,l),p(s,z,l),a(z,js),a(js,me),a(js,Ps),a(Ps,_e),a(js,ge),a(z,je),a(z,$s),a($s,$e),a($s,Is),a(Is,ve),a($s,be),a(z,we),a(z,vs),a(vs,ye),a(vs,Ss),a(Ss,xe),a(vs,ke),p(s,Va,l),v(bs,s,l),p(s,Ya,l),v(X,s,l),Ha=!0},p(s,[l]){const ws={};l&2&&(ws.$$scope={dirty:l,ctx:s}),H.$set(ws);const ha={};l&2&&(ha.$$scope={dirty:l,ctx:s}),Q.$set(ha);const fa={};l&2&&(fa.$$scope={dirty:l,ctx:s}),X.$set(fa)},i(s){Ha||(b(m.$$.fragment,s),b(C.$$.fragment,s),b(H.$$.fragment,s),b(ts.$$.fragment,s),b(es.$$.fragment,s),b(ns.$$.fragment,s),b(ls.$$.fragment,s),b(os.$$.fragment,s),b(rs.$$.fragment,s),b(is.$$.fragment,s),b(hs.$$.fragment,s),b(us.$$.fragment,s),b(ds.$$.fragment,s),b(ms.$$.fragment,s),b(_s.$$.fragment,s),b(gs.$$.fragment,s),b(Q.$$.fragment,s),b(bs.$$.fragment,s),b(X.$$.fragment,s),Ha=!0)},o(s){w(m.$$.fragment,s),w(C.$$.fragment,s),w(H.$$.fragment,s),w(ts.$$.fragment,s),w(es.$$.fragment,s),w(ns.$$.fragment,s),w(ls.$$.fragment,s),w(os.$$.fragment,s),w(rs.$$.fragment,s),w(is.$$.fragment,s),w(hs.$$.fragment,s),w(us.$$.fragment,s),w(ds.$$.fragment,s),w(ms.$$.fragment,s),w(_s.$$.fragment,s),w(gs.$$.fragment,s),w(Q.$$.fragment,s),w(bs.$$.fragment,s),w(X.$$.fragment,s),Ha=!1},d(s){t(f),s&&t(x),s&&t(d),y(m),s&&t(g),y(C,s),s&&t(I),s&&t(ys),s&&t(ua),s&&t(S),s&&t(da),y(H,s),s&&t(ma),s&&t(U),y(ts),s&&t(_a),s&&t(xs),s&&t(ga),y(es,s),s&&t(ja),s&&t(ks),s&&t($a),y(ns,s),s&&t(va),s&&t(A),s&&t(ba),y(ls,s),s&&t(wa),s&&t(Es),s&&t(ya),y(os,s),s&&t(xa),s&&t(L),s&&t(ka),s&&t(V),y(rs),s&&t(Ea),s&&t(Ts),s&&t(Ta),y(is,s),s&&t(qa),s&&t(qs),s&&t(Aa),s&&t(M),s&&t(Ca),y(hs,s),s&&t(Da),s&&t(W),s&&t(Pa),y(us,s),s&&t(Ia),s&&t(D),s&&t(Sa),s&&t(O),s&&t(La),y(ds,s),s&&t(Ma),s&&t(G),s&&t(Wa),y(ms,s),s&&t(Oa),s&&t(Y),y(_s),s&&t(Fa),s&&t(F),s&&t(za),y(gs,s),s&&t(Ra),y(Q,s),s&&t(Na),s&&t(Ds),s&&t(Ua),s&&t(z),s&&t(Va),y(bs,s),s&&t(Ya),y(X,s)}}}const kn={local:"automatic-speech-recognition",sections:[{local:"load-timit-dataset",title:"Load TIMIT dataset"},{local:"preprocess",title:"Preprocess"},{local:"finetune-with-trainer",title:"Fine-tune with Trainer"}],title:"Automatic speech recognition"};function En(N,f,x){let{fw:d}=f;return N.$$set=_=>{"fw"in _&&x(0,d=_.fw)},[d]}class In extends _n{constructor(f){super();gn(this,f,En,xn,jn,{fw:0})}}export{In as default,kn as metadata};
415
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/tasks/translation.mdx-9674fec0.js
import{S as Ro,i as Xo,s as Go,e as r,k as f,w as $,t as o,M as Qo,c as l,d as t,m as h,a as i,x as k,h as n,b as u,F as s,g as p,y as w,q as v,o as b,B as q}from"../../chunks/vendor-4833417e.js";import{T as Ts}from"../../chunks/Tip-fffd6df1.js";import{Y as Ko}from"../../chunks/Youtube-27813aed.js";import{I as yt}from"../../chunks/IconCopyLink-4b81c553.js";import{C as A}from"../../chunks/CodeBlock-6a3d1b46.js";import{C as Vo}from"../../chunks/CodeBlockFw-27a176a0.js";import"../../chunks/CopyButton-dacfbfaf.js";function en(C){let m,j,c,_,E;return{c(){m=r("p"),j=o("See the translation "),c=r("a"),_=o("task page"),E=o(" for more information about its associated models, datasets, and metrics."),this.h()},l(d){m=l(d,"P",{});var g=i(m);j=n(g,"See the translation "),c=l(g,"A",{href:!0,rel:!0});var S=i(c);_=n(S,"task page"),S.forEach(t),E=n(g," for more information about its associated models, datasets, and metrics."),g.forEach(t),this.h()},h(){u(c,"href","https://huggingface.co/tasks/translation"),u(c,"rel","nofollow")},m(d,g){p(d,m,g),s(m,j),s(m,c),s(c,_),s(m,E)},d(d){d&&t(m)}}}function tn(C){let m,j,c,_,E,d,g,S;return{c(){m=r("p"),j=o("If you aren\u2019t familiar with fine-tuning a model with the "),c=r("a"),_=o("Trainer"),E=o(", take a look at the basic tutorial "),d=r("a"),g=o("here"),S=o("!"),this.h()},l(T){m=l(T,"P",{});var y=i(m);j=n(y,"If you aren\u2019t familiar with fine-tuning a model with the "),c=l(y,"A",{href:!0});var z=i(c);_=n(z,"Trainer"),z.forEach(t),E=n(y,", take a look at the basic tutorial "),d=l(y,"A",{href:!0});var D=i(d);g=n(D,"here"),D.forEach(t),S=n(y,"!"),y.forEach(t),this.h()},h(){u(c,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),u(d,"href","training#finetune-with-trainer")},m(T,y){p(T,m,y),s(m,j),s(m,c),s(c,_),s(m,E),s(m,d),s(d,g),s(m,S)},d(T){T&&t(m)}}}function sn(C){let m,j,c,_,E;return{c(){m=r("p"),j=o("If you aren\u2019t familiar with fine-tuning a model with Keras, take a look at the basic tutorial "),c=r("a"),_=o("here"),E=o("!"),this.h()},l(d){m=l(d,"P",{});var g=i(m);j=n(g,"If you aren\u2019t familiar with fine-tuning a model with Keras, take a look at the basic tutorial "),c=l(g,"A",{href:!0});var S=i(c);_=n(S,"here"),S.forEach(t),E=n(g,"!"),g.forEach(t),this.h()},h(){u(c,"href","training#finetune-with-keras")},m(d,g){p(d,m,g),s(m,j),s(m,c),s(c,_),s(m,E)},d(d){d&&t(m)}}}function an(C){let m,j,c,_,E,d,g,S;return{c(){m=r("p"),j=o(`For a more in-depth example of how to fine-tune a model for translation, take a look at the corresponding `),c=r("a"),_=o("PyTorch notebook"),E=o(` or `),d=r("a"),g=o("TensorFlow notebook"),S=o("."),this.h()},l(T){m=l(T,"P",{});var y=i(m);j=n(y,`For a more in-depth example of how to fine-tune a model for translation, take a look at the corresponding `),c=l(y,"A",{href:!0,rel:!0});var z=i(c);_=n(z,"PyTorch notebook"),z.forEach(t),E=n(y,` or `),d=l(y,"A",{href:!0,rel:!0});var D=i(d);g=n(D,"TensorFlow notebook"),D.forEach(t),S=n(y,"."),y.forEach(t),this.h()},h(){u(c,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/translation.ipynb"),u(c,"rel","nofollow"),u(d,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/translation-tf.ipynb"),u(d,"rel","nofollow")},m(T,y){p(T,m,y),s(m,j),s(m,c),s(c,_),s(m,E),s(m,d),s(d,g),s(m,S)},d(T){T&&t(m)}}}function on(C){let m,j,c,_,E,d,g,S,T,y,z,D,De,xs,Et,L,zs,ne,As,Fs,re,Ps,Cs,St,Y,Tt,N,H,Ve,le,Ds,et,Ls,xt,Le,Ms,zt,ie,At,Me,Os,Ft,pe,Pt,Oe,Is,Ct,fe,Dt,Z,Ns,tt,Us,Bs,Lt,U,J,st,he,Ws,at,Ys,Mt,me,Ot,Ie,Hs,It,ue,Nt,Ne,Zs,Ut,M,ot,Js,Ks,nt,Rs,Xs,ce,Gs,rt,Qs,Vs,Bt,de,Wt,F,ea,_e,lt,ta,sa,it,aa,oa,pt,na,ra,Yt,ge,Ht,x,la,Ue,ia,pa,ft,fa,ha,ht,ma,ua,mt,ca,da,Zt,$e,Jt,B,K,ut,ke,_a,ct,ga,Kt,R,$a,Be,ka,wa,Rt,we,Xt,X,Gt,We,va,Qt,O,ve,ba,Ye,qa,ja,ya,be,Ea,He,Sa,Ta,xa,qe,za,Ze,Aa,Fa,Vt,je,es,W,G,dt,ye,Pa,_t,Ca,ts,Je,Da,ss,Q,as,P,La,gt,Ma,Oa,Ee,$t,Ia,Na,kt,Ua,Ba,os,Se,ns,Ke,Wa,rs,Te,ls,V,Ya,Re,Ha,Za,is,xe,ps,ee,Ja,ze,wt,Ka,Ra,fs,Ae,hs,te,Xa,Fe,vt,Ga,Qa,ms,Pe,us,se,cs;return d=new yt({}),z=new Ko({props:{id:"1JvfrvZgi6c"}}),Y=new Ts({props:{$$slots:{default:[en]},$$scope:{ctx:C}}}),le=new yt({}),ie=new A({props:{code:`from datasets import load_dataset books = load_dataset("opus_books", "en-fr")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>books = load_dataset(<span class="hljs-string">&quot;opus_books&quot;</span>, <span class="hljs-string">&quot;en-fr&quot;</span>)`}}),pe=new A({props:{code:'books = books["train"].train_test_split(test_size=0.2)',highlighted:'books = books[<span class="hljs-string">&quot;train&quot;</span>].train_test_split(test_size=<span class="hljs-number">0.2</span>)'}}),fe=new A({props:{code:'books["train"][0]',highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>books[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;id&#x27;</span>: <span class="hljs-string">&#x27;90560&#x27;</span>, <span class="hljs-string">&#x27;translation&#x27;</span>: {<span class="hljs-string">&#x27;en&#x27;</span>: <span class="hljs-string">&#x27;But this lofty plateau measured only a few fathoms, and soon we reentered Our Element.&#x27;</span>, <span class="hljs-string">&#x27;fr&#x27;</span>: <span class="hljs-string">&#x27;Mais ce plateau \xE9lev\xE9 ne mesurait que quelques toises, et bient\xF4t nous f\xFBmes rentr\xE9s dans notre \xE9l\xE9ment.&#x27;</span>}}`}}),he=new yt({}),me=new Ko({props:{id:"XAR8jnZZuUs"}}),ue=new A({props:{code:`from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("t5-small")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;t5-small&quot;</span>)`}}),de=new A({props:{code:`source_lang = "en" target_lang = "fr" prefix = "translate English to French: " def preprocess_function(examples): inputs = [prefix + example[source_lang] for example in examples["translation"]] targets = [example[target_lang] for example in examples["translation"]] model_inputs = tokenizer(inputs, max_length=128, truncation=True) with tokenizer.as_target_tokenizer(): labels = tokenizer(targets, max_length=128, truncation=True) model_inputs["labels"] = labels["input_ids"] return model_inputs`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>source_lang = <span class="hljs-string">&quot;en&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>target_lang = <span class="hljs-string">&quot;fr&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>prefix = <span class="hljs-string">&quot;translate English to French: &quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">preprocess_function</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> inputs = [prefix + example[source_lang] <span class="hljs-keyword">for</span> example <span class="hljs-keyword">in</span> examples[<span class="hljs-string">&quot;translation&quot;</span>]] <span class="hljs-meta">... </span> targets = [example[target_lang] <span class="hljs-keyword">for</span> example <span class="hljs-keyword">in</span> examples[<span class="hljs-string">&quot;translation&quot;</span>]] <span class="hljs-meta">... </span> model_inputs = tokenizer(inputs, max_length=<span class="hljs-number">128</span>, truncation=<span class="hljs-literal">True</span>) <span class="hljs-meta">... </span> <span class="hljs-keyword">with</span> tokenizer.as_target_tokenizer(): <span class="hljs-meta">... </span> labels = tokenizer(targets, max_length=<span class="hljs-number">128</span>, truncation=<span class="hljs-literal">True</span>) <span class="hljs-meta">... </span> model_inputs[<span class="hljs-string">&quot;labels&quot;</span>] = labels[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> model_inputs`}}),ge=new A({props:{code:"tokenized_books = books.map(preprocess_function, batched=True)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>tokenized_books = books.<span class="hljs-built_in">map</span>(preprocess_function, batched=<span class="hljs-literal">True</span>)'}}),$e=new Vo({props:{group1:{id:"pt",code:`from transformers import DataCollatorForSeq2Seq data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=model)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorForSeq2Seq <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=model)`},group2:{id:"tf",code:`from transformers import DataCollatorForSeq2Seq data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=model, return_tensors="tf")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorForSeq2Seq <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=model, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)`}}}),ke=new yt({}),we=new A({props:{code:`from transformers import AutoModelForSeq2SeqLM, Seq2SeqTrainingArguments, Seq2SeqTrainer model = AutoModelForSeq2SeqLM.from_pretrained("t5-small")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForSeq2SeqLM, Seq2SeqTrainingArguments, Seq2SeqTrainer <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;t5-small&quot;</span>)`}}),X=new Ts({props:{$$slots:{default:[tn]},$$scope:{ctx:C}}}),je=new A({props:{code:`training_args = Seq2SeqTrainingArguments( output_dir="./results", evaluation_strategy="epoch", learning_rate=2e-5, per_device_train_batch_size=16, per_device_eval_batch_size=16, weight_decay=0.01, save_total_limit=3, num_train_epochs=1, fp16=True, ) trainer = Seq2SeqTrainer( model=model, args=training_args, train_dataset=tokenized_books["train"], eval_dataset=tokenized_books["test"], tokenizer=tokenizer, data_collator=data_collator, ) trainer.train()`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>training_args = Seq2SeqTrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> evaluation_strategy=<span class="hljs-string">&quot;epoch&quot;</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">2e-5</span>, <span class="hljs-meta">... </span> per_device_train_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> per_device_eval_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> weight_decay=<span class="hljs-number">0.01</span>, <span class="hljs-meta">... </span> save_total_limit=<span class="hljs-number">3</span>, <span class="hljs-meta">... </span> num_train_epochs=<span class="hljs-number">1</span>, <span class="hljs-meta">... </span> fp16=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Seq2SeqTrainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=tokenized_books[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=tokenized_books[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> tokenizer=tokenizer, <span class="hljs-meta">... </span> data_collator=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()`}}),ye=new yt({}),Q=new Ts({props:{$$slots:{default:[sn]},$$scope:{ctx:C}}}),Se=new A({props:{code:`tf_train_set = tokenized_books["train"].to_tf_dataset( columns=["attention_mask", "input_ids", "labels"], shuffle=True, batch_size=16, collate_fn=data_collator, ) tf_test_set = tokenized_books["test"].to_tf_dataset( columns=["attention_mask", "input_ids", "labels"], shuffle=False, batch_size=16, collate_fn=data_collator, )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>tf_train_set = tokenized_books[<span class="hljs-string">&quot;train&quot;</span>].to_tf_dataset( <span class="hljs-meta">... </span> columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;labels&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_test_set = tokenized_books[<span class="hljs-string">&quot;test&quot;</span>].to_tf_dataset( <span class="hljs-meta">... </span> columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;labels&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>)`}}),Te=new A({props:{code:`from transformers import create_optimizer, AdamWeightDecay optimizer = AdamWeightDecay(learning_rate=2e-5, weight_decay_rate=0.01)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> create_optimizer, AdamWeightDecay <span class="hljs-meta">&gt;&gt;&gt; </span>optimizer = AdamWeightDecay(learning_rate=<span class="hljs-number">2e-5</span>, weight_decay_rate=<span class="hljs-number">0.01</span>)`}}),xe=new A({props:{code:`from transformers import TFAutoModelForSeq2SeqLM model = TFAutoModelForSeq2SeqLM.from_pretrained("t5-small")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForSeq2SeqLM <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;t5-small&quot;</span>)`}}),Ae=new A({props:{code:"model.compile(optimizer=optimizer)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>model.<span class="hljs-built_in">compile</span>(optimizer=optimizer)'}}),Pe=new A({props:{code:"model.fit(x=tf_train_set, validation_data=tf_test_set, epochs=3)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>model.fit(x=tf_train_set, validation_data=tf_test_set, epochs=<span class="hljs-number">3</span>)'}}),se=new Ts({props:{$$slots:{default:[an]},$$scope:{ctx:C}}}),{c(){m=r("meta"),j=f(),c=r("h1"),_=r("a"),E=r("span"),$(d.$$.fragment),g=f(),S=r("span"),T=o("Translation"),y=f(),$(z.$$.fragment),D=f(),De=r("p"),xs=o("Translation converts a sequence of text from one language to another. It is one of several tasks you can formulate as a sequence-to-sequence problem, a powerful framework that extends to vision and audio tasks."),Et=f(),L=r("p"),zs=o("This guide will show you how to fine-tune "),ne=r("a"),As=o("T5"),Fs=o(" on the English-French subset of the "),re=r("a"),Ps=o("OPUS Books"),Cs=o(" dataset to translate English text to French."),St=f(),$(Y.$$.fragment),Tt=f(),N=r("h2"),H=r("a"),Ve=r("span"),$(le.$$.fragment),Ds=f(),et=r("span"),Ls=o("Load OPUS Books dataset"),xt=f(),Le=r("p"),Ms=o("Load the OPUS Books dataset from the \u{1F917} Datasets library:"),zt=f(),$(ie.$$.fragment),At=f(),Me=r("p"),Os=o("Split this dataset into a train and test set:"),Ft=f(),$(pe.$$.fragment),Pt=f(),Oe=r("p"),Is=o("Then take a look at an example:"),Ct=f(),$(fe.$$.fragment),Dt=f(),Z=r("p"),Ns=o("The "),tt=r("code"),Us=o("translation"),Bs=o(" field is a dictionary containing the English and French translations of the text."),Lt=f(),U=r("h2"),J=r("a"),st=r("span"),$(he.$$.fragment),Ws=f(),at=r("span"),Ys=o("Preprocess"),Mt=f(),$(me.$$.fragment),Ot=f(),Ie=r("p"),Hs=o("Load the T5 tokenizer to process the language pairs:"),It=f(),$(ue.$$.fragment),Nt=f(),Ne=r("p"),Zs=o("The preprocessing function needs to:"),Ut=f(),M=r("ol"),ot=r("li"),Js=o("Prefix the input with a prompt so T5 knows this is a translation task. Some models capable of multiple NLP tasks require prompting for specific tasks."),Ks=f(),nt=r("li"),Rs=o("Tokenize the input (English) and target (French) separately. You can\u2019t tokenize French text with a tokenizer pretrained on an English vocabulary. A context manager will help set the tokenizer to French first before tokenizing it."),Xs=f(),ce=r("li"),Gs=o("Truncate sequences to be no longer than the maximum length set by the "),rt=r("code"),Qs=o("max_length"),Vs=o(" parameter."),Bt=f(),$(de.$$.fragment),Wt=f(),F=r("p"),ea=o("Use \u{1F917} Datasets "),_e=r("a"),lt=r("code"),ta=o("map"),sa=o(" function to apply the preprocessing function over the entire dataset. You can speed up the "),it=r("code"),aa=o("map"),oa=o(" function by setting "),pt=r("code"),na=o("batched=True"),ra=o(" to process multiple elements of the dataset at once:"),Yt=f(),$(ge.$$.fragment),Ht=f(),x=r("p"),la=o("Use "),Ue=r("a"),ia=o("DataCollatorForSeq2Seq"),pa=o(" to create a batch of examples. It will also "),ft=r("em"),fa=o("dynamically pad"),ha=o(" your text and labels to the length of the longest element in its batch, so they are a uniform length. While it is possible to pad your text in the "),ht=r("code"),ma=o("tokenizer"),ua=o(" function by setting "),mt=r("code"),ca=o("padding=True"),da=o(", dynamic padding is more efficient."),Zt=f(),$($e.$$.fragment),Jt=f(),B=r("h2"),K=r("a"),ut=r("span"),$(ke.$$.fragment),_a=f(),ct=r("span"),ga=o("Fine-tune with Trainer"),Kt=f(),R=r("p"),$a=o("Load T5 with "),Be=r("a"),ka=o("AutoModelForSeq2SeqLM"),wa=o(":"),Rt=f(),$(we.$$.fragment),Xt=f(),$(X.$$.fragment),Gt=f(),We=r("p"),va=o("At this point, only three steps remain:"),Qt=f(),O=r("ol"),ve=r("li"),ba=o("Define your training hyperparameters in "),Ye=r("a"),qa=o("Seq2SeqTrainingArguments"),ja=o("."),ya=f(),be=r("li"),Ea=o("Pass the training arguments to "),He=r("a"),Sa=o("Seq2SeqTrainer"),Ta=o(" along with the model, dataset, tokenizer, and data collator."),xa=f(),qe=r("li"),za=o("Call "),Ze=r("a"),Aa=o("train()"),Fa=o(" to fine-tune your model."),Vt=f(),$(je.$$.fragment),es=f(),W=r("h2"),G=r("a"),dt=r("span"),$(ye.$$.fragment),Pa=f(),_t=r("span"),Ca=o("Fine-tune with TensorFlow"),ts=f(),Je=r("p"),Da=o("To fine-tune a model in TensorFlow is just as easy, with only a few differences."),ss=f(),$(Q.$$.fragment),as=f(),P=r("p"),La=o("Convert your datasets to the "),gt=r("code"),Ma=o("tf.data.Dataset"),Oa=o(" format with "),Ee=r("a"),$t=r("code"),Ia=o("to_tf_dataset"),Na=o(". Specify inputs and labels in "),kt=r("code"),Ua=o("columns"),Ba=o(", whether to shuffle the dataset order, batch size, and the data collator:"),os=f(),$(Se.$$.fragment),ns=f(),Ke=r("p"),Wa=o("Set up an optimizer function, learning rate schedule, and some training hyperparameters:"),rs=f(),$(Te.$$.fragment),ls=f(),V=r("p"),Ya=o("Load T5 with "),Re=r("a"),Ha=o("TFAutoModelForSeq2SeqLM"),Za=o(":"),is=f(),$(xe.$$.fragment),ps=f(),ee=r("p"),Ja=o("Configure the model for training with "),ze=r("a"),wt=r("code"),Ka=o("compile"),Ra=o(":"),fs=f(),$(Ae.$$.fragment),hs=f(),te=r("p"),Xa=o("Call "),Fe=r("a"),vt=r("code"),Ga=o("fit"),Qa=o(" to fine-tune the model:"),ms=f(),$(Pe.$$.fragment),us=f(),$(se.$$.fragment),this.h()},l(e){const a=Qo('[data-svelte="svelte-1phssyn"]',document.head);m=l(a,"META",{name:!0,content:!0}),a.forEach(t),j=h(e),c=l(e,"H1",{class:!0});var Ce=i(c);_=l(Ce,"A",{id:!0,class:!0,href:!0});var bt=i(_);E=l(bt,"SPAN",{});var qt=i(E);k(d.$$.fragment,qt),qt.forEach(t),bt.forEach(t),g=h(Ce),S=l(Ce,"SPAN",{});var jt=i(S);T=n(jt,"Translation"),jt.forEach(t),Ce.forEach(t),y=h(e),k(z.$$.fragment,e),D=h(e),De=l(e,"P",{});var Va=i(De);xs=n(Va,"Translation converts a sequence of text from one language to another. It is one of several tasks you can formulate as a sequence-to-sequence problem, a powerful framework that extends to vision and audio tasks."),Va.forEach(t),Et=h(e),L=l(e,"P",{});var Xe=i(L);zs=n(Xe,"This guide will show you how to fine-tune "),ne=l(Xe,"A",{href:!0,rel:!0});var eo=i(ne);As=n(eo,"T5"),eo.forEach(t),Fs=n(Xe," on the English-French subset of the "),re=l(Xe,"A",{href:!0,rel:!0});var to=i(re);Ps=n(to,"OPUS Books"),to.forEach(t),Cs=n(Xe," dataset to translate English text to French."),Xe.forEach(t),St=h(e),k(Y.$$.fragment,e),Tt=h(e),N=l(e,"H2",{class:!0});var ds=i(N);H=l(ds,"A",{id:!0,class:!0,href:!0});var so=i(H);Ve=l(so,"SPAN",{});var ao=i(Ve);k(le.$$.fragment,ao),ao.forEach(t),so.forEach(t),Ds=h(ds),et=l(ds,"SPAN",{});var oo=i(et);Ls=n(oo,"Load OPUS Books dataset"),oo.forEach(t),ds.forEach(t),xt=h(e),Le=l(e,"P",{});var no=i(Le);Ms=n(no,"Load the OPUS Books dataset from the \u{1F917} Datasets library:"),no.forEach(t),zt=h(e),k(ie.$$.fragment,e),At=h(e),Me=l(e,"P",{});var ro=i(Me);Os=n(ro,"Split this dataset into a train and test set:"),ro.forEach(t),Ft=h(e),k(pe.$$.fragment,e),Pt=h(e),Oe=l(e,"P",{});var lo=i(Oe);Is=n(lo,"Then take a look at an example:"),lo.forEach(t),Ct=h(e),k(fe.$$.fragment,e),Dt=h(e),Z=l(e,"P",{});var _s=i(Z);Ns=n(_s,"The "),tt=l(_s,"CODE",{});var io=i(tt);Us=n(io,"translation"),io.forEach(t),Bs=n(_s," field is a dictionary containing the English and French translations of the text."),_s.forEach(t),Lt=h(e),U=l(e,"H2",{class:!0});var gs=i(U);J=l(gs,"A",{id:!0,class:!0,href:!0});var po=i(J);st=l(po,"SPAN",{});var fo=i(st);k(he.$$.fragment,fo),fo.forEach(t),po.forEach(t),Ws=h(gs),at=l(gs,"SPAN",{});var ho=i(at);Ys=n(ho,"Preprocess"),ho.forEach(t),gs.forEach(t),Mt=h(e),k(me.$$.fragment,e),Ot=h(e),Ie=l(e,"P",{});var mo=i(Ie);Hs=n(mo,"Load the T5 tokenizer to process the language pairs:"),mo.forEach(t),It=h(e),k(ue.$$.fragment,e),Nt=h(e),Ne=l(e,"P",{});var uo=i(Ne);Zs=n(uo,"The preprocessing function needs to:"),uo.forEach(t),Ut=h(e),M=l(e,"OL",{});var Ge=i(M);ot=l(Ge,"LI",{});var co=i(ot);Js=n(co,"Prefix the input with a prompt so T5 knows this is a translation task. Some models capable of multiple NLP tasks require prompting for specific tasks."),co.forEach(t),Ks=h(Ge),nt=l(Ge,"LI",{});var _o=i(nt);Rs=n(_o,"Tokenize the input (English) and target (French) separately. You can\u2019t tokenize French text with a tokenizer pretrained on an English vocabulary. A context manager will help set the tokenizer to French first before tokenizing it."),_o.forEach(t),Xs=h(Ge),ce=l(Ge,"LI",{});var $s=i(ce);Gs=n($s,"Truncate sequences to be no longer than the maximum length set by the "),rt=l($s,"CODE",{});var go=i(rt);Qs=n(go,"max_length"),go.forEach(t),Vs=n($s," parameter."),$s.forEach(t),Ge.forEach(t),Bt=h(e),k(de.$$.fragment,e),Wt=h(e),F=l(e,"P",{});var ae=i(F);ea=n(ae,"Use \u{1F917} Datasets "),_e=l(ae,"A",{href:!0,rel:!0});var $o=i(_e);lt=l($o,"CODE",{});var ko=i(lt);ta=n(ko,"map"),ko.forEach(t),$o.forEach(t),sa=n(ae," function to apply the preprocessing function over the entire dataset. You can speed up the "),it=l(ae,"CODE",{});var wo=i(it);aa=n(wo,"map"),wo.forEach(t),oa=n(ae," function by setting "),pt=l(ae,"CODE",{});var vo=i(pt);na=n(vo,"batched=True"),vo.forEach(t),ra=n(ae," to process multiple elements of the dataset at once:"),ae.forEach(t),Yt=h(e),k(ge.$$.fragment,e),Ht=h(e),x=l(e,"P",{});var I=i(x);la=n(I,"Use "),Ue=l(I,"A",{href:!0});var bo=i(Ue);ia=n(bo,"DataCollatorForSeq2Seq"),bo.forEach(t),pa=n(I," to create a batch of examples. It will also "),ft=l(I,"EM",{});var qo=i(ft);fa=n(qo,"dynamically pad"),qo.forEach(t),ha=n(I," your text and labels to the length of the longest element in its batch, so they are a uniform length. While it is possible to pad your text in the "),ht=l(I,"CODE",{});var jo=i(ht);ma=n(jo,"tokenizer"),jo.forEach(t),ua=n(I," function by setting "),mt=l(I,"CODE",{});var yo=i(mt);ca=n(yo,"padding=True"),yo.forEach(t),da=n(I,", dynamic padding is more efficient."),I.forEach(t),Zt=h(e),k($e.$$.fragment,e),Jt=h(e),B=l(e,"H2",{class:!0});var ks=i(B);K=l(ks,"A",{id:!0,class:!0,href:!0});var Eo=i(K);ut=l(Eo,"SPAN",{});var So=i(ut);k(ke.$$.fragment,So),So.forEach(t),Eo.forEach(t),_a=h(ks),ct=l(ks,"SPAN",{});var To=i(ct);ga=n(To,"Fine-tune with Trainer"),To.forEach(t),ks.forEach(t),Kt=h(e),R=l(e,"P",{});var ws=i(R);$a=n(ws,"Load T5 with "),Be=l(ws,"A",{href:!0});var xo=i(Be);ka=n(xo,"AutoModelForSeq2SeqLM"),xo.forEach(t),wa=n(ws,":"),ws.forEach(t),Rt=h(e),k(we.$$.fragment,e),Xt=h(e),k(X.$$.fragment,e),Gt=h(e),We=l(e,"P",{});var zo=i(We);va=n(zo,"At this point, only three steps remain:"),zo.forEach(t),Qt=h(e),O=l(e,"OL",{});var Qe=i(O);ve=l(Qe,"LI",{});var vs=i(ve);ba=n(vs,"Define your training hyperparameters in "),Ye=l(vs,"A",{href:!0});var Ao=i(Ye);qa=n(Ao,"Seq2SeqTrainingArguments"),Ao.forEach(t),ja=n(vs,"."),vs.forEach(t),ya=h(Qe),be=l(Qe,"LI",{});var bs=i(be);Ea=n(bs,"Pass the training arguments to "),He=l(bs,"A",{href:!0});var Fo=i(He);Sa=n(Fo,"Seq2SeqTrainer"),Fo.forEach(t),Ta=n(bs," along with the model, dataset, tokenizer, and data collator."),bs.forEach(t),xa=h(Qe),qe=l(Qe,"LI",{});var qs=i(qe);za=n(qs,"Call "),Ze=l(qs,"A",{href:!0});var Po=i(Ze);Aa=n(Po,"train()"),Po.forEach(t),Fa=n(qs," to fine-tune your model."),qs.forEach(t),Qe.forEach(t),Vt=h(e),k(je.$$.fragment,e),es=h(e),W=l(e,"H2",{class:!0});var js=i(W);G=l(js,"A",{id:!0,class:!0,href:!0});var Co=i(G);dt=l(Co,"SPAN",{});var Do=i(dt);k(ye.$$.fragment,Do),Do.forEach(t),Co.forEach(t),Pa=h(js),_t=l(js,"SPAN",{});var Lo=i(_t);Ca=n(Lo,"Fine-tune with TensorFlow"),Lo.forEach(t),js.forEach(t),ts=h(e),Je=l(e,"P",{});var Mo=i(Je);Da=n(Mo,"To fine-tune a model in TensorFlow is just as easy, with only a few differences."),Mo.forEach(t),ss=h(e),k(Q.$$.fragment,e),as=h(e),P=l(e,"P",{});var oe=i(P);La=n(oe,"Convert your datasets to the "),gt=l(oe,"CODE",{});var Oo=i(gt);Ma=n(Oo,"tf.data.Dataset"),Oo.forEach(t),Oa=n(oe," format with "),Ee=l(oe,"A",{href:!0,rel:!0});var Io=i(Ee);$t=l(Io,"CODE",{});var No=i($t);Ia=n(No,"to_tf_dataset"),No.forEach(t),Io.forEach(t),Na=n(oe,". Specify inputs and labels in "),kt=l(oe,"CODE",{});var Uo=i(kt);Ua=n(Uo,"columns"),Uo.forEach(t),Ba=n(oe,", whether to shuffle the dataset order, batch size, and the data collator:"),oe.forEach(t),os=h(e),k(Se.$$.fragment,e),ns=h(e),Ke=l(e,"P",{});var Bo=i(Ke);Wa=n(Bo,"Set up an optimizer function, learning rate schedule, and some training hyperparameters:"),Bo.forEach(t),rs=h(e),k(Te.$$.fragment,e),ls=h(e),V=l(e,"P",{});var ys=i(V);Ya=n(ys,"Load T5 with "),Re=l(ys,"A",{href:!0});var Wo=i(Re);Ha=n(Wo,"TFAutoModelForSeq2SeqLM"),Wo.forEach(t),Za=n(ys,":"),ys.forEach(t),is=h(e),k(xe.$$.fragment,e),ps=h(e),ee=l(e,"P",{});var Es=i(ee);Ja=n(Es,"Configure the model for training with "),ze=l(Es,"A",{href:!0,rel:!0});var Yo=i(ze);wt=l(Yo,"CODE",{});var Ho=i(wt);Ka=n(Ho,"compile"),Ho.forEach(t),Yo.forEach(t),Ra=n(Es,":"),Es.forEach(t),fs=h(e),k(Ae.$$.fragment,e),hs=h(e),te=l(e,"P",{});var Ss=i(te);Xa=n(Ss,"Call "),Fe=l(Ss,"A",{href:!0,rel:!0});var Zo=i(Fe);vt=l(Zo,"CODE",{});var Jo=i(vt);Ga=n(Jo,"fit"),Jo.forEach(t),Zo.forEach(t),Qa=n(Ss," to fine-tune the model:"),Ss.forEach(t),ms=h(e),k(Pe.$$.fragment,e),us=h(e),k(se.$$.fragment,e),this.h()},h(){u(m,"name","hf:doc:metadata"),u(m,"content",JSON.stringify(nn)),u(_,"id","translation"),u(_,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(_,"href","#translation"),u(c,"class","relative group"),u(ne,"href","https://huggingface.co/t5-small"),u(ne,"rel","nofollow"),u(re,"href","https://huggingface.co/datasets/opus_books"),u(re,"rel","nofollow"),u(H,"id","load-opus-books-dataset"),u(H,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(H,"href","#load-opus-books-dataset"),u(N,"class","relative group"),u(J,"id","preprocess"),u(J,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(J,"href","#preprocess"),u(U,"class","relative group"),u(_e,"href","https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map"),u(_e,"rel","nofollow"),u(Ue,"href","/docs/transformers/pr_16143/en/main_classes/data_collator#transformers.DataCollatorForSeq2Seq"),u(K,"id","finetune-with-trainer"),u(K,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(K,"href","#finetune-with-trainer"),u(B,"class","relative group"),u(Be,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoModelForSeq2SeqLM"),u(Ye,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Seq2SeqTrainingArguments"),u(He,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Seq2SeqTrainer"),u(Ze,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer.train"),u(G,"id","finetune-with-tensorflow"),u(G,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),u(G,"href","#finetune-with-tensorflow"),u(W,"class","relative group"),u(Ee,"href","https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.to_tf_dataset"),u(Ee,"rel","nofollow"),u(Re,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.TFAutoModelForSeq2SeqLM"),u(ze,"href","https://keras.io/api/models/model_training_apis/#compile-method"),u(ze,"rel","nofollow"),u(Fe,"href","https://keras.io/api/models/model_training_apis/#fit-method"),u(Fe,"rel","nofollow")},m(e,a){s(document.head,m),p(e,j,a),p(e,c,a),s(c,_),s(_,E),w(d,E,null),s(c,g),s(c,S),s(S,T),p(e,y,a),w(z,e,a),p(e,D,a),p(e,De,a),s(De,xs),p(e,Et,a),p(e,L,a),s(L,zs),s(L,ne),s(ne,As),s(L,Fs),s(L,re),s(re,Ps),s(L,Cs),p(e,St,a),w(Y,e,a),p(e,Tt,a),p(e,N,a),s(N,H),s(H,Ve),w(le,Ve,null),s(N,Ds),s(N,et),s(et,Ls),p(e,xt,a),p(e,Le,a),s(Le,Ms),p(e,zt,a),w(ie,e,a),p(e,At,a),p(e,Me,a),s(Me,Os),p(e,Ft,a),w(pe,e,a),p(e,Pt,a),p(e,Oe,a),s(Oe,Is),p(e,Ct,a),w(fe,e,a),p(e,Dt,a),p(e,Z,a),s(Z,Ns),s(Z,tt),s(tt,Us),s(Z,Bs),p(e,Lt,a),p(e,U,a),s(U,J),s(J,st),w(he,st,null),s(U,Ws),s(U,at),s(at,Ys),p(e,Mt,a),w(me,e,a),p(e,Ot,a),p(e,Ie,a),s(Ie,Hs),p(e,It,a),w(ue,e,a),p(e,Nt,a),p(e,Ne,a),s(Ne,Zs),p(e,Ut,a),p(e,M,a),s(M,ot),s(ot,Js),s(M,Ks),s(M,nt),s(nt,Rs),s(M,Xs),s(M,ce),s(ce,Gs),s(ce,rt),s(rt,Qs),s(ce,Vs),p(e,Bt,a),w(de,e,a),p(e,Wt,a),p(e,F,a),s(F,ea),s(F,_e),s(_e,lt),s(lt,ta),s(F,sa),s(F,it),s(it,aa),s(F,oa),s(F,pt),s(pt,na),s(F,ra),p(e,Yt,a),w(ge,e,a),p(e,Ht,a),p(e,x,a),s(x,la),s(x,Ue),s(Ue,ia),s(x,pa),s(x,ft),s(ft,fa),s(x,ha),s(x,ht),s(ht,ma),s(x,ua),s(x,mt),s(mt,ca),s(x,da),p(e,Zt,a),w($e,e,a),p(e,Jt,a),p(e,B,a),s(B,K),s(K,ut),w(ke,ut,null),s(B,_a),s(B,ct),s(ct,ga),p(e,Kt,a),p(e,R,a),s(R,$a),s(R,Be),s(Be,ka),s(R,wa),p(e,Rt,a),w(we,e,a),p(e,Xt,a),w(X,e,a),p(e,Gt,a),p(e,We,a),s(We,va),p(e,Qt,a),p(e,O,a),s(O,ve),s(ve,ba),s(ve,Ye),s(Ye,qa),s(ve,ja),s(O,ya),s(O,be),s(be,Ea),s(be,He),s(He,Sa),s(be,Ta),s(O,xa),s(O,qe),s(qe,za),s(qe,Ze),s(Ze,Aa),s(qe,Fa),p(e,Vt,a),w(je,e,a),p(e,es,a),p(e,W,a),s(W,G),s(G,dt),w(ye,dt,null),s(W,Pa),s(W,_t),s(_t,Ca),p(e,ts,a),p(e,Je,a),s(Je,Da),p(e,ss,a),w(Q,e,a),p(e,as,a),p(e,P,a),s(P,La),s(P,gt),s(gt,Ma),s(P,Oa),s(P,Ee),s(Ee,$t),s($t,Ia),s(P,Na),s(P,kt),s(kt,Ua),s(P,Ba),p(e,os,a),w(Se,e,a),p(e,ns,a),p(e,Ke,a),s(Ke,Wa),p(e,rs,a),w(Te,e,a),p(e,ls,a),p(e,V,a),s(V,Ya),s(V,Re),s(Re,Ha),s(V,Za),p(e,is,a),w(xe,e,a),p(e,ps,a),p(e,ee,a),s(ee,Ja),s(ee,ze),s(ze,wt),s(wt,Ka),s(ee,Ra),p(e,fs,a),w(Ae,e,a),p(e,hs,a),p(e,te,a),s(te,Xa),s(te,Fe),s(Fe,vt),s(vt,Ga),s(te,Qa),p(e,ms,a),w(Pe,e,a),p(e,us,a),w(se,e,a),cs=!0},p(e,[a]){const Ce={};a&2&&(Ce.$$scope={dirty:a,ctx:e}),Y.$set(Ce);const bt={};a&2&&(bt.$$scope={dirty:a,ctx:e}),X.$set(bt);const qt={};a&2&&(qt.$$scope={dirty:a,ctx:e}),Q.$set(qt);const jt={};a&2&&(jt.$$scope={dirty:a,ctx:e}),se.$set(jt)},i(e){cs||(v(d.$$.fragment,e),v(z.$$.fragment,e),v(Y.$$.fragment,e),v(le.$$.fragment,e),v(ie.$$.fragment,e),v(pe.$$.fragment,e),v(fe.$$.fragment,e),v(he.$$.fragment,e),v(me.$$.fragment,e),v(ue.$$.fragment,e),v(de.$$.fragment,e),v(ge.$$.fragment,e),v($e.$$.fragment,e),v(ke.$$.fragment,e),v(we.$$.fragment,e),v(X.$$.fragment,e),v(je.$$.fragment,e),v(ye.$$.fragment,e),v(Q.$$.fragment,e),v(Se.$$.fragment,e),v(Te.$$.fragment,e),v(xe.$$.fragment,e),v(Ae.$$.fragment,e),v(Pe.$$.fragment,e),v(se.$$.fragment,e),cs=!0)},o(e){b(d.$$.fragment,e),b(z.$$.fragment,e),b(Y.$$.fragment,e),b(le.$$.fragment,e),b(ie.$$.fragment,e),b(pe.$$.fragment,e),b(fe.$$.fragment,e),b(he.$$.fragment,e),b(me.$$.fragment,e),b(ue.$$.fragment,e),b(de.$$.fragment,e),b(ge.$$.fragment,e),b($e.$$.fragment,e),b(ke.$$.fragment,e),b(we.$$.fragment,e),b(X.$$.fragment,e),b(je.$$.fragment,e),b(ye.$$.fragment,e),b(Q.$$.fragment,e),b(Se.$$.fragment,e),b(Te.$$.fragment,e),b(xe.$$.fragment,e),b(Ae.$$.fragment,e),b(Pe.$$.fragment,e),b(se.$$.fragment,e),cs=!1},d(e){t(m),e&&t(j),e&&t(c),q(d),e&&t(y),q(z,e),e&&t(D),e&&t(De),e&&t(Et),e&&t(L),e&&t(St),q(Y,e),e&&t(Tt),e&&t(N),q(le),e&&t(xt),e&&t(Le),e&&t(zt),q(ie,e),e&&t(At),e&&t(Me),e&&t(Ft),q(pe,e),e&&t(Pt),e&&t(Oe),e&&t(Ct),q(fe,e),e&&t(Dt),e&&t(Z),e&&t(Lt),e&&t(U),q(he),e&&t(Mt),q(me,e),e&&t(Ot),e&&t(Ie),e&&t(It),q(ue,e),e&&t(Nt),e&&t(Ne),e&&t(Ut),e&&t(M),e&&t(Bt),q(de,e),e&&t(Wt),e&&t(F),e&&t(Yt),q(ge,e),e&&t(Ht),e&&t(x),e&&t(Zt),q($e,e),e&&t(Jt),e&&t(B),q(ke),e&&t(Kt),e&&t(R),e&&t(Rt),q(we,e),e&&t(Xt),q(X,e),e&&t(Gt),e&&t(We),e&&t(Qt),e&&t(O),e&&t(Vt),q(je,e),e&&t(es),e&&t(W),q(ye),e&&t(ts),e&&t(Je),e&&t(ss),q(Q,e),e&&t(as),e&&t(P),e&&t(os),q(Se,e),e&&t(ns),e&&t(Ke),e&&t(rs),q(Te,e),e&&t(ls),e&&t(V),e&&t(is),q(xe,e),e&&t(ps),e&&t(ee),e&&t(fs),q(Ae,e),e&&t(hs),e&&t(te),e&&t(ms),q(Pe,e),e&&t(us),q(se,e)}}}const nn={local:"translation",sections:[{local:"load-opus-books-dataset",title:"Load OPUS Books dataset"},{local:"preprocess",title:"Preprocess"},{local:"finetune-with-trainer",title:"Fine-tune with Trainer"},{local:"finetune-with-tensorflow",title:"Fine-tune with TensorFlow"}],title:"Translation"};function rn(C,m,j){let{fw:c}=m;return C.$$set=_=>{"fw"in _&&j(0,c=_.fw)},[c]}class dn extends Ro{constructor(m){super();Xo(this,m,rn,on,Go,{fw:0})}}export{dn as default,nn as metadata};
416
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/tasks/summarization.mdx-34e69920.js
import{S as $n,i as wn,s as vn,e as r,k as c,w as b,t as o,M as yn,c as i,d as a,m as h,a as l,x as $,h as n,b as m,F as t,g as p,y as w,q as v,o as y,B as q}from"../../chunks/vendor-4833417e.js";import{T as Ma}from"../../chunks/Tip-fffd6df1.js";import{Y as qn}from"../../chunks/Youtube-27813aed.js";import{I as Ft}from"../../chunks/IconCopyLink-4b81c553.js";import{C as A}from"../../chunks/CodeBlock-6a3d1b46.js";import{C as kn}from"../../chunks/CodeBlockFw-27a176a0.js";import"../../chunks/CopyButton-dacfbfaf.js";function jn(P){let f,k,u,g,S;return{c(){f=r("p"),k=o("See the summarization "),u=r("a"),g=o("task page"),S=o(" for more information about its associated models, datasets, and metrics."),this.h()},l(d){f=i(d,"P",{});var _=l(f);k=n(_,"See the summarization "),u=i(_,"A",{href:!0,rel:!0});var E=l(u);g=n(E,"task page"),E.forEach(a),S=n(_," for more information about its associated models, datasets, and metrics."),_.forEach(a),this.h()},h(){m(u,"href","https://huggingface.co/tasks/summarization"),m(u,"rel","nofollow")},m(d,_){p(d,f,_),t(f,k),t(f,u),t(u,g),t(f,S)},d(d){d&&a(f)}}}function Sn(P){let f,k,u,g,S,d,_,E;return{c(){f=r("p"),k=o("If you aren\u2019t familiar with fine-tuning a model with the "),u=r("a"),g=o("Trainer"),S=o(", take a look at the basic tutorial "),d=r("a"),_=o("here"),E=o("!"),this.h()},l(z){f=i(z,"P",{});var j=l(f);k=n(j,"If you aren\u2019t familiar with fine-tuning a model with the "),u=i(j,"A",{href:!0});var x=l(u);g=n(x,"Trainer"),x.forEach(a),S=n(j,", take a look at the basic tutorial "),d=i(j,"A",{href:!0});var D=l(d);_=n(D,"here"),D.forEach(a),E=n(j,"!"),j.forEach(a),this.h()},h(){m(u,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),m(d,"href","training#finetune-with-trainer")},m(z,j){p(z,f,j),t(f,k),t(f,u),t(u,g),t(f,S),t(f,d),t(d,_),t(f,E)},d(z){z&&a(f)}}}function En(P){let f,k,u,g,S;return{c(){f=r("p"),k=o("If you aren\u2019t familiar with fine-tuning a model with Keras, take a look at the basic tutorial "),u=r("a"),g=o("here"),S=o("!"),this.h()},l(d){f=i(d,"P",{});var _=l(f);k=n(_,"If you aren\u2019t familiar with fine-tuning a model with Keras, take a look at the basic tutorial "),u=i(_,"A",{href:!0});var E=l(u);g=n(E,"here"),E.forEach(a),S=n(_,"!"),_.forEach(a),this.h()},h(){m(u,"href","training#finetune-with-keras")},m(d,_){p(d,f,_),t(f,k),t(f,u),t(u,g),t(f,S)},d(d){d&&a(f)}}}function zn(P){let f,k,u,g,S,d,_,E;return{c(){f=r("p"),k=o(`For a more in-depth example of how to fine-tune a model for summarization, take a look at the corresponding `),u=r("a"),g=o("PyTorch notebook"),S=o(` or `),d=r("a"),_=o("TensorFlow notebook"),E=o("."),this.h()},l(z){f=i(z,"P",{});var j=l(f);k=n(j,`For a more in-depth example of how to fine-tune a model for summarization, take a look at the corresponding `),u=i(j,"A",{href:!0,rel:!0});var x=l(u);g=n(x,"PyTorch notebook"),x.forEach(a),S=n(j,` or `),d=i(j,"A",{href:!0,rel:!0});var D=l(d);_=n(D,"TensorFlow notebook"),D.forEach(a),E=n(j,"."),j.forEach(a),this.h()},h(){m(u,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/summarization.ipynb"),m(u,"rel","nofollow"),m(d,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/summarization-tf.ipynb"),m(d,"rel","nofollow")},m(z,j){p(z,f,j),t(f,k),t(f,u),t(u,g),t(f,S),t(f,d),t(d,_),t(f,E)},d(z){z&&a(f)}}}function Tn(P){let f,k,u,g,S,d,_,E,z,j,x,D,Me,Ia,Pt,G,at,Oa,Na,st,Ba,Dt,L,Ua,ie,Ha,Wa,le,Ya,Ga,Lt,K,Mt,U,X,ot,pe,Ka,nt,Xa,It,Ie,Ja,Ot,ce,Nt,Oe,Qa,Bt,he,Ut,Ne,Ra,Ht,fe,Wt,M,Va,rt,Za,es,it,ts,as,Yt,H,J,lt,me,ss,pt,os,Gt,I,ns,ct,rs,is,ht,ls,ps,Kt,ue,Xt,Be,cs,Jt,O,ft,hs,fs,de,ms,mt,us,ds,gs,ge,_s,ut,bs,$s,Qt,_e,Rt,C,ws,be,dt,vs,ys,gt,qs,ks,_t,js,Ss,Vt,$e,Zt,T,Es,Ue,zs,Ts,bt,xs,As,$t,Cs,Fs,wt,Ps,Ds,ea,we,ta,W,Q,vt,ve,Ls,yt,Ms,aa,R,Is,He,Os,Ns,sa,ye,oa,V,na,We,Bs,ra,N,qe,Us,Ye,Hs,Ws,Ys,ke,Gs,Ge,Ks,Xs,Js,je,Qs,Ke,Rs,Vs,ia,Se,la,Y,Z,qt,Ee,Zs,kt,eo,pa,Xe,to,ca,ee,ha,F,ao,jt,so,oo,ze,St,no,ro,Et,io,lo,fa,Te,ma,Je,po,ua,xe,da,te,co,Qe,ho,fo,ga,Ae,_a,ae,mo,Ce,zt,uo,go,ba,Fe,$a,se,_o,Pe,Tt,bo,$o,wa,De,va,oe,ya;return d=new Ft({}),x=new qn({props:{id:"yHnr5Dk2zCI"}}),K=new Ma({props:{$$slots:{default:[jn]},$$scope:{ctx:P}}}),pe=new Ft({}),ce=new A({props:{code:`from datasets import load_dataset billsum = load_dataset("billsum", split="ca_test")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>billsum = load_dataset(<span class="hljs-string">&quot;billsum&quot;</span>, split=<span class="hljs-string">&quot;ca_test&quot;</span>)`}}),he=new A({props:{code:"billsum = billsum.train_test_split(test_size=0.2)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>billsum = billsum.train_test_split(test_size=<span class="hljs-number">0.2</span>)'}}),fe=new A({props:{code:'billsum["train"][0]',highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>billsum[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;summary&#x27;</span>: <span class="hljs-string">&#x27;Existing law authorizes state agencies to enter into contracts for the acquisition of goods or services upon approval by the Department of General Services. Existing law sets forth various requirements and prohibitions for those contracts, including, but not limited to, a prohibition on entering into contracts for the acquisition of goods or services of $100,000 or more with a contractor that discriminates between spouses and domestic partners or same-sex and different-sex couples in the provision of benefits. Existing law provides that a contract entered into in violation of those requirements and prohibitions is void and authorizes the state or any person acting on behalf of the state to bring a civil action seeking a determination that a contract is in violation and therefore void. Under existing law, a willful violation of those requirements and prohibitions is a misdemeanor.\\nThis bill would also prohibit a state agency from entering into contracts for the acquisition of goods or services of $100,000 or more with a contractor that discriminates between employees on the basis of gender identity in the provision of benefits, as specified. By expanding the scope of a crime, this bill would impose a state-mandated local program.\\nThe California Constitution requires the state to reimburse local agencies and school districts for certain costs mandated by the state. Statutory provisions establish procedures for making that reimbursement.\\nThis bill would provide that no reimbursement is required by this act for a specified reason.&#x27;</span>, <span class="hljs-string">&#x27;text&#x27;</span>: <span class="hljs-string">&#x27;The people of the State of California do enact as follows:\\n\\n\\nSECTION 1.\\nSection 10295.35 is added to the Public Contract Code, to read:\\n10295.35.\\n(a) (1) Notwithstanding any other law, a state agency shall not enter into any contract for the acquisition of goods or services in the amount of one hundred thousand dollars ($100,000) or more with a contractor that, in the provision of benefits, discriminates between employees on the basis of an employee\u2019s or dependent\u2019s actual or perceived gender identity, including, but not limited to, the employee\u2019s or dependent\u2019s identification as transgender.\\n(2) For purposes of this section, \u201Ccontract\u201D includes contracts with a cumulative amount of one hundred thousand dollars ($100,000) or more per contractor in each fiscal year.\\n(3) For purposes of this section, an employee health plan is discriminatory if the plan is not consistent with Section 1365.5 of the Health and Safety Code and Section 10140 of the Insurance Code.\\n(4) The requirements of this section shall apply only to those portions of a contractor\u2019s operations that occur under any of the following conditions:\\n(A) Within the state.\\n(B) On real property outside the state if the property is owned by the state or if the state has a right to occupy the property, and if the contractor\u2019s presence at that location is connected to a contract with the state.\\n(C) Elsewhere in the United States where work related to a state contract is being performed.\\n(b) Contractors shall treat as confidential, to the maximum extent allowed by law or by the requirement of the contractor\u2019s insurance provider, any request by an employee or applicant for employment benefits or any documentation of eligibility for benefits submitted by an employee or applicant for employment.\\n(c) After taking all reasonable measures to find a contractor that complies with this section, as determined by the state agency, the requirements of this section may be waived under any of the following circumstances:\\n(1) There is only one prospective contractor willing to enter into a specific contract with the state agency.\\n(2) The contract is necessary to respond to an emergency, as determined by the state agency, that endangers the public health, welfare, or safety, or the contract is necessary for the provision of essential services, and no entity that complies with the requirements of this section capable of responding to the emergency is immediately available.\\n(3) The requirements of this section violate, or are inconsistent with, the terms or conditions of a grant, subvention, or agreement, if the agency has made a good faith attempt to change the terms or conditions of any grant, subvention, or agreement to authorize application of this section.\\n(4) The contractor is providing wholesale or bulk water, power, or natural gas, the conveyance or transmission of the same, or ancillary services, as required for ensuring reliable services in accordance with good utility practice, if the purchase of the same cannot practically be accomplished through the standard competitive bidding procedures and the contractor is not providing direct retail services to end users.\\n(d) (1) A contractor shall not be deemed to discriminate in the provision of benefits if the contractor, in providing the benefits, pays the actual costs incurred in obtaining the benefit.\\n(2) If a contractor is unable to provide a certain benefit, despite taking reasonable measures to do so, the contractor shall not be deemed to discriminate in the provision of benefits.\\n(e) (1) Every contract subject to this chapter shall contain a statement by which the contractor certifies that the contractor is in compliance with this section.\\n(2) The department or other contracting agency shall enforce this section pursuant to its existing enforcement powers.\\n(3) (A) If a contractor falsely certifies that it is in compliance with this section, the contract with that contractor shall be subject to Article 9 (commencing with Section 10420), unless, within a time period specified by the department or other contracting agency, the contractor provides to the department or agency proof that it has complied, or is in the process of complying, with this section.\\n(B) The application of the remedies or penalties contained in Article 9 (commencing with Section 10420) to a contract subject to this chapter shall not preclude the application of any existing remedies otherwise available to the department or other contracting agency under its existing enforcement powers.\\n(f) Nothing in this section is intended to regulate the contracting practices of any local jurisdiction.\\n(g) This section shall be construed so as not to conflict with applicable federal laws, rules, or regulations. In the event that a court or agency of competent jurisdiction holds that federal law, rule, or regulation invalidates any clause, sentence, paragraph, or section of this code or the application thereof to any person or circumstances, it is the intent of the state that the court or agency sever that clause, sentence, paragraph, or section so that the remainder of this section shall remain in effect.\\nSEC. 2.\\nSection 10295.35 of the Public Contract Code shall not be construed to create any new enforcement authority or responsibility in the Department of General Services or any other contracting agency.\\nSEC. 3.\\nNo reimbursement is required by this act pursuant to Section 6 of Article XIII\\u2009B of the California Constitution because the only costs that may be incurred by a local agency or school district will be incurred because this act creates a new crime or infraction, eliminates a crime or infraction, or changes the penalty for a crime or infraction, within the meaning of Section 17556 of the Government Code, or changes the definition of a crime within the meaning of Section 6 of Article XIII\\u2009B of the California Constitution.&#x27;</span>, <span class="hljs-string">&#x27;title&#x27;</span>: <span class="hljs-string">&#x27;An act to add Section 10295.35 to the Public Contract Code, relating to public contracts.&#x27;</span>}`}}),me=new Ft({}),ue=new A({props:{code:`from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("t5-small")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;t5-small&quot;</span>)`}}),_e=new A({props:{code:`prefix = "summarize: " def preprocess_function(examples): inputs = [prefix + doc for doc in examples["text"]] model_inputs = tokenizer(inputs, max_length=1024, truncation=True) with tokenizer.as_target_tokenizer(): labels = tokenizer(examples["summary"], max_length=128, truncation=True) model_inputs["labels"] = labels["input_ids"] return model_inputs`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>prefix = <span class="hljs-string">&quot;summarize: &quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">preprocess_function</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> inputs = [prefix + doc <span class="hljs-keyword">for</span> doc <span class="hljs-keyword">in</span> examples[<span class="hljs-string">&quot;text&quot;</span>]] <span class="hljs-meta">... </span> model_inputs = tokenizer(inputs, max_length=<span class="hljs-number">1024</span>, truncation=<span class="hljs-literal">True</span>) <span class="hljs-meta">... </span> <span class="hljs-keyword">with</span> tokenizer.as_target_tokenizer(): <span class="hljs-meta">... </span> labels = tokenizer(examples[<span class="hljs-string">&quot;summary&quot;</span>], max_length=<span class="hljs-number">128</span>, truncation=<span class="hljs-literal">True</span>) <span class="hljs-meta">... </span> model_inputs[<span class="hljs-string">&quot;labels&quot;</span>] = labels[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> model_inputs`}}),$e=new A({props:{code:"tokenized_billsum = billsum.map(preprocess_function, batched=True)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>tokenized_billsum = billsum.<span class="hljs-built_in">map</span>(preprocess_function, batched=<span class="hljs-literal">True</span>)'}}),we=new kn({props:{group1:{id:"pt",code:`from transformers import DataCollatorForSeq2Seq data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=model)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorForSeq2Seq <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=model)`},group2:{id:"tf",code:`from transformers import DataCollatorForSeq2Seq data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=model, return_tensors="tf")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorForSeq2Seq <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=model, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)`}}}),ve=new Ft({}),ye=new A({props:{code:`from transformers import AutoModelForSeq2SeqLM, Seq2SeqTrainingArguments, Seq2SeqTrainer model = AutoModelForSeq2SeqLM.from_pretrained("t5-small")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForSeq2SeqLM, Seq2SeqTrainingArguments, Seq2SeqTrainer <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;t5-small&quot;</span>)`}}),V=new Ma({props:{$$slots:{default:[Sn]},$$scope:{ctx:P}}}),Se=new A({props:{code:`training_args = Seq2SeqTrainingArguments( output_dir="./results", evaluation_strategy="epoch", learning_rate=2e-5, per_device_train_batch_size=16, per_device_eval_batch_size=16, weight_decay=0.01, save_total_limit=3, num_train_epochs=1, fp16=True, ) trainer = Seq2SeqTrainer( model=model, args=training_args, train_dataset=tokenized_billsum["train"], eval_dataset=tokenized_billsum["test"], tokenizer=tokenizer, data_collator=data_collator, ) trainer.train()`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>training_args = Seq2SeqTrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> evaluation_strategy=<span class="hljs-string">&quot;epoch&quot;</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">2e-5</span>, <span class="hljs-meta">... </span> per_device_train_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> per_device_eval_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> weight_decay=<span class="hljs-number">0.01</span>, <span class="hljs-meta">... </span> save_total_limit=<span class="hljs-number">3</span>, <span class="hljs-meta">... </span> num_train_epochs=<span class="hljs-number">1</span>, <span class="hljs-meta">... </span> fp16=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Seq2SeqTrainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=tokenized_billsum[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=tokenized_billsum[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> tokenizer=tokenizer, <span class="hljs-meta">... </span> data_collator=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()`}}),Ee=new Ft({}),ee=new Ma({props:{$$slots:{default:[En]},$$scope:{ctx:P}}}),Te=new A({props:{code:`tf_train_set = tokenized_billsum["train"].to_tf_dataset( columns=["attention_mask", "input_ids", "labels"], shuffle=True, batch_size=16, collate_fn=data_collator, ) tf_test_set = tokenized_billsum["test"].to_tf_dataset( columns=["attention_mask", "input_ids", "labels"], shuffle=False, batch_size=16, collate_fn=data_collator, )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>tf_train_set = tokenized_billsum[<span class="hljs-string">&quot;train&quot;</span>].to_tf_dataset( <span class="hljs-meta">... </span> columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;labels&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_test_set = tokenized_billsum[<span class="hljs-string">&quot;test&quot;</span>].to_tf_dataset( <span class="hljs-meta">... </span> columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;labels&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>)`}}),xe=new A({props:{code:`from transformers import create_optimizer, AdamWeightDecay optimizer = AdamWeightDecay(learning_rate=2e-5, weight_decay_rate=0.01)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> create_optimizer, AdamWeightDecay <span class="hljs-meta">&gt;&gt;&gt; </span>optimizer = AdamWeightDecay(learning_rate=<span class="hljs-number">2e-5</span>, weight_decay_rate=<span class="hljs-number">0.01</span>)`}}),Ae=new A({props:{code:`from transformers import TFAutoModelForSeq2SeqLM model = TFAutoModelForSeq2SeqLM.from_pretrained("t5-small")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForSeq2SeqLM <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;t5-small&quot;</span>)`}}),Fe=new A({props:{code:"model.compile(optimizer=optimizer)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>model.<span class="hljs-built_in">compile</span>(optimizer=optimizer)'}}),De=new A({props:{code:"model.fit(x=tf_train_set, validation_data=tf_test_set, epochs=3)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>model.fit(x=tf_train_set, validation_data=tf_test_set, epochs=<span class="hljs-number">3</span>)'}}),oe=new Ma({props:{$$slots:{default:[zn]},$$scope:{ctx:P}}}),{c(){f=r("meta"),k=c(),u=r("h1"),g=r("a"),S=r("span"),b(d.$$.fragment),_=c(),E=r("span"),z=o("Summarization"),j=c(),b(x.$$.fragment),D=c(),Me=r("p"),Ia=o("Summarization creates a shorter version of a document or an article that captures all the important information. Along with translation, it is another example of a task that can be formulated as a sequence-to-sequence task. Summarization can be:"),Pt=c(),G=r("ul"),at=r("li"),Oa=o("Extractive: extract the most relevant information from a document."),Na=c(),st=r("li"),Ba=o("Abstractive: generate new text that captures the most relevant information."),Dt=c(),L=r("p"),Ua=o("This guide will show you how to fine-tune "),ie=r("a"),Ha=o("T5"),Wa=o(" on the California state bill subset of the "),le=r("a"),Ya=o("BillSum"),Ga=o(" dataset for abstractive summarization."),Lt=c(),b(K.$$.fragment),Mt=c(),U=r("h2"),X=r("a"),ot=r("span"),b(pe.$$.fragment),Ka=c(),nt=r("span"),Xa=o("Load BillSum dataset"),It=c(),Ie=r("p"),Ja=o("Load the BillSum dataset from the \u{1F917} Datasets library:"),Ot=c(),b(ce.$$.fragment),Nt=c(),Oe=r("p"),Qa=o("Split this dataset into a train and test set:"),Bt=c(),b(he.$$.fragment),Ut=c(),Ne=r("p"),Ra=o("Then take a look at an example:"),Ht=c(),b(fe.$$.fragment),Wt=c(),M=r("p"),Va=o("The "),rt=r("code"),Za=o("text"),es=o(" field is the input and the "),it=r("code"),ts=o("summary"),as=o(" field is the target."),Yt=c(),H=r("h2"),J=r("a"),lt=r("span"),b(me.$$.fragment),ss=c(),pt=r("span"),os=o("Preprocess"),Gt=c(),I=r("p"),ns=o("Load the T5 tokenizer to process "),ct=r("code"),rs=o("text"),is=o(" and "),ht=r("code"),ls=o("summary"),ps=o(":"),Kt=c(),b(ue.$$.fragment),Xt=c(),Be=r("p"),cs=o("The preprocessing function needs to:"),Jt=c(),O=r("ol"),ft=r("li"),hs=o("Prefix the input with a prompt so T5 knows this is a summarization task. Some models capable of multiple NLP tasks require prompting for specific tasks."),fs=c(),de=r("li"),ms=o("Use a context manager with the "),mt=r("code"),us=o("as_target_tokenizer()"),ds=o(" function to parallelize tokenization of inputs and labels."),gs=c(),ge=r("li"),_s=o("Truncate sequences to be no longer than the maximum length set by the "),ut=r("code"),bs=o("max_length"),$s=o(" parameter."),Qt=c(),b(_e.$$.fragment),Rt=c(),C=r("p"),ws=o("Use \u{1F917} Datasets "),be=r("a"),dt=r("code"),vs=o("map"),ys=o(" function to apply the preprocessing function over the entire dataset. You can speed up the "),gt=r("code"),qs=o("map"),ks=o(" function by setting "),_t=r("code"),js=o("batched=True"),Ss=o(" to process multiple elements of the dataset at once:"),Vt=c(),b($e.$$.fragment),Zt=c(),T=r("p"),Es=o("Use "),Ue=r("a"),zs=o("DataCollatorForSeq2Seq"),Ts=o(" to create a batch of examples. It will also "),bt=r("em"),xs=o("dynamically pad"),As=o(" your text and labels to the length of the longest element in its batch, so they are a uniform length. While it is possible to pad your text in the "),$t=r("code"),Cs=o("tokenizer"),Fs=o(" function by setting "),wt=r("code"),Ps=o("padding=True"),Ds=o(", dynamic padding is more efficient."),ea=c(),b(we.$$.fragment),ta=c(),W=r("h2"),Q=r("a"),vt=r("span"),b(ve.$$.fragment),Ls=c(),yt=r("span"),Ms=o("Fine-tune with Trainer"),aa=c(),R=r("p"),Is=o("Load T5 with "),He=r("a"),Os=o("AutoModelForSeq2SeqLM"),Ns=o(":"),sa=c(),b(ye.$$.fragment),oa=c(),b(V.$$.fragment),na=c(),We=r("p"),Bs=o("At this point, only three steps remain:"),ra=c(),N=r("ol"),qe=r("li"),Us=o("Define your training hyperparameters in "),Ye=r("a"),Hs=o("Seq2SeqTrainingArguments"),Ws=o("."),Ys=c(),ke=r("li"),Gs=o("Pass the training arguments to "),Ge=r("a"),Ks=o("Seq2SeqTrainer"),Xs=o(" along with the model, dataset, tokenizer, and data collator."),Js=c(),je=r("li"),Qs=o("Call "),Ke=r("a"),Rs=o("train()"),Vs=o(" to fine-tune your model."),ia=c(),b(Se.$$.fragment),la=c(),Y=r("h2"),Z=r("a"),qt=r("span"),b(Ee.$$.fragment),Zs=c(),kt=r("span"),eo=o("Fine-tune with TensorFlow"),pa=c(),Xe=r("p"),to=o("To fine-tune a model in TensorFlow is just as easy, with only a few differences."),ca=c(),b(ee.$$.fragment),ha=c(),F=r("p"),ao=o("Convert your datasets to the "),jt=r("code"),so=o("tf.data.Dataset"),oo=o(" format with "),ze=r("a"),St=r("code"),no=o("to_tf_dataset"),ro=o(". Specify inputs and labels in "),Et=r("code"),io=o("columns"),lo=o(", whether to shuffle the dataset order, batch size, and the data collator:"),fa=c(),b(Te.$$.fragment),ma=c(),Je=r("p"),po=o("Set up an optimizer function, learning rate schedule, and some training hyperparameters:"),ua=c(),b(xe.$$.fragment),da=c(),te=r("p"),co=o("Load T5 with "),Qe=r("a"),ho=o("TFAutoModelForSeq2SeqLM"),fo=o(":"),ga=c(),b(Ae.$$.fragment),_a=c(),ae=r("p"),mo=o("Configure the model for training with "),Ce=r("a"),zt=r("code"),uo=o("compile"),go=o(":"),ba=c(),b(Fe.$$.fragment),$a=c(),se=r("p"),_o=o("Call "),Pe=r("a"),Tt=r("code"),bo=o("fit"),$o=o(" to fine-tune the model:"),wa=c(),b(De.$$.fragment),va=c(),b(oe.$$.fragment),this.h()},l(e){const s=yn('[data-svelte="svelte-1phssyn"]',document.head);f=i(s,"META",{name:!0,content:!0}),s.forEach(a),k=h(e),u=i(e,"H1",{class:!0});var Le=l(u);g=i(Le,"A",{id:!0,class:!0,href:!0});var xt=l(g);S=i(xt,"SPAN",{});var At=l(S);$(d.$$.fragment,At),At.forEach(a),xt.forEach(a),_=h(Le),E=i(Le,"SPAN",{});var Ct=l(E);z=n(Ct,"Summarization"),Ct.forEach(a),Le.forEach(a),j=h(e),$(x.$$.fragment,e),D=h(e),Me=i(e,"P",{});var wo=l(Me);Ia=n(wo,"Summarization creates a shorter version of a document or an article that captures all the important information. Along with translation, it is another example of a task that can be formulated as a sequence-to-sequence task. Summarization can be:"),wo.forEach(a),Pt=h(e),G=i(e,"UL",{});var qa=l(G);at=i(qa,"LI",{});var vo=l(at);Oa=n(vo,"Extractive: extract the most relevant information from a document."),vo.forEach(a),Na=h(qa),st=i(qa,"LI",{});var yo=l(st);Ba=n(yo,"Abstractive: generate new text that captures the most relevant information."),yo.forEach(a),qa.forEach(a),Dt=h(e),L=i(e,"P",{});var Re=l(L);Ua=n(Re,"This guide will show you how to fine-tune "),ie=i(Re,"A",{href:!0,rel:!0});var qo=l(ie);Ha=n(qo,"T5"),qo.forEach(a),Wa=n(Re," on the California state bill subset of the "),le=i(Re,"A",{href:!0,rel:!0});var ko=l(le);Ya=n(ko,"BillSum"),ko.forEach(a),Ga=n(Re," dataset for abstractive summarization."),Re.forEach(a),Lt=h(e),$(K.$$.fragment,e),Mt=h(e),U=i(e,"H2",{class:!0});var ka=l(U);X=i(ka,"A",{id:!0,class:!0,href:!0});var jo=l(X);ot=i(jo,"SPAN",{});var So=l(ot);$(pe.$$.fragment,So),So.forEach(a),jo.forEach(a),Ka=h(ka),nt=i(ka,"SPAN",{});var Eo=l(nt);Xa=n(Eo,"Load BillSum dataset"),Eo.forEach(a),ka.forEach(a),It=h(e),Ie=i(e,"P",{});var zo=l(Ie);Ja=n(zo,"Load the BillSum dataset from the \u{1F917} Datasets library:"),zo.forEach(a),Ot=h(e),$(ce.$$.fragment,e),Nt=h(e),Oe=i(e,"P",{});var To=l(Oe);Qa=n(To,"Split this dataset into a train and test set:"),To.forEach(a),Bt=h(e),$(he.$$.fragment,e),Ut=h(e),Ne=i(e,"P",{});var xo=l(Ne);Ra=n(xo,"Then take a look at an example:"),xo.forEach(a),Ht=h(e),$(fe.$$.fragment,e),Wt=h(e),M=i(e,"P",{});var Ve=l(M);Va=n(Ve,"The "),rt=i(Ve,"CODE",{});var Ao=l(rt);Za=n(Ao,"text"),Ao.forEach(a),es=n(Ve," field is the input and the "),it=i(Ve,"CODE",{});var Co=l(it);ts=n(Co,"summary"),Co.forEach(a),as=n(Ve," field is the target."),Ve.forEach(a),Yt=h(e),H=i(e,"H2",{class:!0});var ja=l(H);J=i(ja,"A",{id:!0,class:!0,href:!0});var Fo=l(J);lt=i(Fo,"SPAN",{});var Po=l(lt);$(me.$$.fragment,Po),Po.forEach(a),Fo.forEach(a),ss=h(ja),pt=i(ja,"SPAN",{});var Do=l(pt);os=n(Do,"Preprocess"),Do.forEach(a),ja.forEach(a),Gt=h(e),I=i(e,"P",{});var Ze=l(I);ns=n(Ze,"Load the T5 tokenizer to process "),ct=i(Ze,"CODE",{});var Lo=l(ct);rs=n(Lo,"text"),Lo.forEach(a),is=n(Ze," and "),ht=i(Ze,"CODE",{});var Mo=l(ht);ls=n(Mo,"summary"),Mo.forEach(a),ps=n(Ze,":"),Ze.forEach(a),Kt=h(e),$(ue.$$.fragment,e),Xt=h(e),Be=i(e,"P",{});var Io=l(Be);cs=n(Io,"The preprocessing function needs to:"),Io.forEach(a),Jt=h(e),O=i(e,"OL",{});var et=l(O);ft=i(et,"LI",{});var Oo=l(ft);hs=n(Oo,"Prefix the input with a prompt so T5 knows this is a summarization task. Some models capable of multiple NLP tasks require prompting for specific tasks."),Oo.forEach(a),fs=h(et),de=i(et,"LI",{});var Sa=l(de);ms=n(Sa,"Use a context manager with the "),mt=i(Sa,"CODE",{});var No=l(mt);us=n(No,"as_target_tokenizer()"),No.forEach(a),ds=n(Sa," function to parallelize tokenization of inputs and labels."),Sa.forEach(a),gs=h(et),ge=i(et,"LI",{});var Ea=l(ge);_s=n(Ea,"Truncate sequences to be no longer than the maximum length set by the "),ut=i(Ea,"CODE",{});var Bo=l(ut);bs=n(Bo,"max_length"),Bo.forEach(a),$s=n(Ea," parameter."),Ea.forEach(a),et.forEach(a),Qt=h(e),$(_e.$$.fragment,e),Rt=h(e),C=i(e,"P",{});var ne=l(C);ws=n(ne,"Use \u{1F917} Datasets "),be=i(ne,"A",{href:!0,rel:!0});var Uo=l(be);dt=i(Uo,"CODE",{});var Ho=l(dt);vs=n(Ho,"map"),Ho.forEach(a),Uo.forEach(a),ys=n(ne," function to apply the preprocessing function over the entire dataset. You can speed up the "),gt=i(ne,"CODE",{});var Wo=l(gt);qs=n(Wo,"map"),Wo.forEach(a),ks=n(ne," function by setting "),_t=i(ne,"CODE",{});var Yo=l(_t);js=n(Yo,"batched=True"),Yo.forEach(a),Ss=n(ne," to process multiple elements of the dataset at once:"),ne.forEach(a),Vt=h(e),$($e.$$.fragment,e),Zt=h(e),T=i(e,"P",{});var B=l(T);Es=n(B,"Use "),Ue=i(B,"A",{href:!0});var Go=l(Ue);zs=n(Go,"DataCollatorForSeq2Seq"),Go.forEach(a),Ts=n(B," to create a batch of examples. It will also "),bt=i(B,"EM",{});var Ko=l(bt);xs=n(Ko,"dynamically pad"),Ko.forEach(a),As=n(B," your text and labels to the length of the longest element in its batch, so they are a uniform length. While it is possible to pad your text in the "),$t=i(B,"CODE",{});var Xo=l($t);Cs=n(Xo,"tokenizer"),Xo.forEach(a),Fs=n(B," function by setting "),wt=i(B,"CODE",{});var Jo=l(wt);Ps=n(Jo,"padding=True"),Jo.forEach(a),Ds=n(B,", dynamic padding is more efficient."),B.forEach(a),ea=h(e),$(we.$$.fragment,e),ta=h(e),W=i(e,"H2",{class:!0});var za=l(W);Q=i(za,"A",{id:!0,class:!0,href:!0});var Qo=l(Q);vt=i(Qo,"SPAN",{});var Ro=l(vt);$(ve.$$.fragment,Ro),Ro.forEach(a),Qo.forEach(a),Ls=h(za),yt=i(za,"SPAN",{});var Vo=l(yt);Ms=n(Vo,"Fine-tune with Trainer"),Vo.forEach(a),za.forEach(a),aa=h(e),R=i(e,"P",{});var Ta=l(R);Is=n(Ta,"Load T5 with "),He=i(Ta,"A",{href:!0});var Zo=l(He);Os=n(Zo,"AutoModelForSeq2SeqLM"),Zo.forEach(a),Ns=n(Ta,":"),Ta.forEach(a),sa=h(e),$(ye.$$.fragment,e),oa=h(e),$(V.$$.fragment,e),na=h(e),We=i(e,"P",{});var en=l(We);Bs=n(en,"At this point, only three steps remain:"),en.forEach(a),ra=h(e),N=i(e,"OL",{});var tt=l(N);qe=i(tt,"LI",{});var xa=l(qe);Us=n(xa,"Define your training hyperparameters in "),Ye=i(xa,"A",{href:!0});var tn=l(Ye);Hs=n(tn,"Seq2SeqTrainingArguments"),tn.forEach(a),Ws=n(xa,"."),xa.forEach(a),Ys=h(tt),ke=i(tt,"LI",{});var Aa=l(ke);Gs=n(Aa,"Pass the training arguments to "),Ge=i(Aa,"A",{href:!0});var an=l(Ge);Ks=n(an,"Seq2SeqTrainer"),an.forEach(a),Xs=n(Aa," along with the model, dataset, tokenizer, and data collator."),Aa.forEach(a),Js=h(tt),je=i(tt,"LI",{});var Ca=l(je);Qs=n(Ca,"Call "),Ke=i(Ca,"A",{href:!0});var sn=l(Ke);Rs=n(sn,"train()"),sn.forEach(a),Vs=n(Ca," to fine-tune your model."),Ca.forEach(a),tt.forEach(a),ia=h(e),$(Se.$$.fragment,e),la=h(e),Y=i(e,"H2",{class:!0});var Fa=l(Y);Z=i(Fa,"A",{id:!0,class:!0,href:!0});var on=l(Z);qt=i(on,"SPAN",{});var nn=l(qt);$(Ee.$$.fragment,nn),nn.forEach(a),on.forEach(a),Zs=h(Fa),kt=i(Fa,"SPAN",{});var rn=l(kt);eo=n(rn,"Fine-tune with TensorFlow"),rn.forEach(a),Fa.forEach(a),pa=h(e),Xe=i(e,"P",{});var ln=l(Xe);to=n(ln,"To fine-tune a model in TensorFlow is just as easy, with only a few differences."),ln.forEach(a),ca=h(e),$(ee.$$.fragment,e),ha=h(e),F=i(e,"P",{});var re=l(F);ao=n(re,"Convert your datasets to the "),jt=i(re,"CODE",{});var pn=l(jt);so=n(pn,"tf.data.Dataset"),pn.forEach(a),oo=n(re," format with "),ze=i(re,"A",{href:!0,rel:!0});var cn=l(ze);St=i(cn,"CODE",{});var hn=l(St);no=n(hn,"to_tf_dataset"),hn.forEach(a),cn.forEach(a),ro=n(re,". Specify inputs and labels in "),Et=i(re,"CODE",{});var fn=l(Et);io=n(fn,"columns"),fn.forEach(a),lo=n(re,", whether to shuffle the dataset order, batch size, and the data collator:"),re.forEach(a),fa=h(e),$(Te.$$.fragment,e),ma=h(e),Je=i(e,"P",{});var mn=l(Je);po=n(mn,"Set up an optimizer function, learning rate schedule, and some training hyperparameters:"),mn.forEach(a),ua=h(e),$(xe.$$.fragment,e),da=h(e),te=i(e,"P",{});var Pa=l(te);co=n(Pa,"Load T5 with "),Qe=i(Pa,"A",{href:!0});var un=l(Qe);ho=n(un,"TFAutoModelForSeq2SeqLM"),un.forEach(a),fo=n(Pa,":"),Pa.forEach(a),ga=h(e),$(Ae.$$.fragment,e),_a=h(e),ae=i(e,"P",{});var Da=l(ae);mo=n(Da,"Configure the model for training with "),Ce=i(Da,"A",{href:!0,rel:!0});var dn=l(Ce);zt=i(dn,"CODE",{});var gn=l(zt);uo=n(gn,"compile"),gn.forEach(a),dn.forEach(a),go=n(Da,":"),Da.forEach(a),ba=h(e),$(Fe.$$.fragment,e),$a=h(e),se=i(e,"P",{});var La=l(se);_o=n(La,"Call "),Pe=i(La,"A",{href:!0,rel:!0});var _n=l(Pe);Tt=i(_n,"CODE",{});var bn=l(Tt);bo=n(bn,"fit"),bn.forEach(a),_n.forEach(a),$o=n(La," to fine-tune the model:"),La.forEach(a),wa=h(e),$(De.$$.fragment,e),va=h(e),$(oe.$$.fragment,e),this.h()},h(){m(f,"name","hf:doc:metadata"),m(f,"content",JSON.stringify(xn)),m(g,"id","summarization"),m(g,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(g,"href","#summarization"),m(u,"class","relative group"),m(ie,"href","https://huggingface.co/t5-small"),m(ie,"rel","nofollow"),m(le,"href","https://huggingface.co/datasets/billsum"),m(le,"rel","nofollow"),m(X,"id","load-billsum-dataset"),m(X,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(X,"href","#load-billsum-dataset"),m(U,"class","relative group"),m(J,"id","preprocess"),m(J,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(J,"href","#preprocess"),m(H,"class","relative group"),m(be,"href","https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map"),m(be,"rel","nofollow"),m(Ue,"href","/docs/transformers/pr_16143/en/main_classes/data_collator#transformers.DataCollatorForSeq2Seq"),m(Q,"id","finetune-with-trainer"),m(Q,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Q,"href","#finetune-with-trainer"),m(W,"class","relative group"),m(He,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoModelForSeq2SeqLM"),m(Ye,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Seq2SeqTrainingArguments"),m(Ge,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Seq2SeqTrainer"),m(Ke,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer.train"),m(Z,"id","finetune-with-tensorflow"),m(Z,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(Z,"href","#finetune-with-tensorflow"),m(Y,"class","relative group"),m(ze,"href","https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.to_tf_dataset"),m(ze,"rel","nofollow"),m(Qe,"href","/docs/transformers/pr_16143/en/model_doc/auto#transformers.TFAutoModelForSeq2SeqLM"),m(Ce,"href","https://keras.io/api/models/model_training_apis/#compile-method"),m(Ce,"rel","nofollow"),m(Pe,"href","https://keras.io/api/models/model_training_apis/#fit-method"),m(Pe,"rel","nofollow")},m(e,s){t(document.head,f),p(e,k,s),p(e,u,s),t(u,g),t(g,S),w(d,S,null),t(u,_),t(u,E),t(E,z),p(e,j,s),w(x,e,s),p(e,D,s),p(e,Me,s),t(Me,Ia),p(e,Pt,s),p(e,G,s),t(G,at),t(at,Oa),t(G,Na),t(G,st),t(st,Ba),p(e,Dt,s),p(e,L,s),t(L,Ua),t(L,ie),t(ie,Ha),t(L,Wa),t(L,le),t(le,Ya),t(L,Ga),p(e,Lt,s),w(K,e,s),p(e,Mt,s),p(e,U,s),t(U,X),t(X,ot),w(pe,ot,null),t(U,Ka),t(U,nt),t(nt,Xa),p(e,It,s),p(e,Ie,s),t(Ie,Ja),p(e,Ot,s),w(ce,e,s),p(e,Nt,s),p(e,Oe,s),t(Oe,Qa),p(e,Bt,s),w(he,e,s),p(e,Ut,s),p(e,Ne,s),t(Ne,Ra),p(e,Ht,s),w(fe,e,s),p(e,Wt,s),p(e,M,s),t(M,Va),t(M,rt),t(rt,Za),t(M,es),t(M,it),t(it,ts),t(M,as),p(e,Yt,s),p(e,H,s),t(H,J),t(J,lt),w(me,lt,null),t(H,ss),t(H,pt),t(pt,os),p(e,Gt,s),p(e,I,s),t(I,ns),t(I,ct),t(ct,rs),t(I,is),t(I,ht),t(ht,ls),t(I,ps),p(e,Kt,s),w(ue,e,s),p(e,Xt,s),p(e,Be,s),t(Be,cs),p(e,Jt,s),p(e,O,s),t(O,ft),t(ft,hs),t(O,fs),t(O,de),t(de,ms),t(de,mt),t(mt,us),t(de,ds),t(O,gs),t(O,ge),t(ge,_s),t(ge,ut),t(ut,bs),t(ge,$s),p(e,Qt,s),w(_e,e,s),p(e,Rt,s),p(e,C,s),t(C,ws),t(C,be),t(be,dt),t(dt,vs),t(C,ys),t(C,gt),t(gt,qs),t(C,ks),t(C,_t),t(_t,js),t(C,Ss),p(e,Vt,s),w($e,e,s),p(e,Zt,s),p(e,T,s),t(T,Es),t(T,Ue),t(Ue,zs),t(T,Ts),t(T,bt),t(bt,xs),t(T,As),t(T,$t),t($t,Cs),t(T,Fs),t(T,wt),t(wt,Ps),t(T,Ds),p(e,ea,s),w(we,e,s),p(e,ta,s),p(e,W,s),t(W,Q),t(Q,vt),w(ve,vt,null),t(W,Ls),t(W,yt),t(yt,Ms),p(e,aa,s),p(e,R,s),t(R,Is),t(R,He),t(He,Os),t(R,Ns),p(e,sa,s),w(ye,e,s),p(e,oa,s),w(V,e,s),p(e,na,s),p(e,We,s),t(We,Bs),p(e,ra,s),p(e,N,s),t(N,qe),t(qe,Us),t(qe,Ye),t(Ye,Hs),t(qe,Ws),t(N,Ys),t(N,ke),t(ke,Gs),t(ke,Ge),t(Ge,Ks),t(ke,Xs),t(N,Js),t(N,je),t(je,Qs),t(je,Ke),t(Ke,Rs),t(je,Vs),p(e,ia,s),w(Se,e,s),p(e,la,s),p(e,Y,s),t(Y,Z),t(Z,qt),w(Ee,qt,null),t(Y,Zs),t(Y,kt),t(kt,eo),p(e,pa,s),p(e,Xe,s),t(Xe,to),p(e,ca,s),w(ee,e,s),p(e,ha,s),p(e,F,s),t(F,ao),t(F,jt),t(jt,so),t(F,oo),t(F,ze),t(ze,St),t(St,no),t(F,ro),t(F,Et),t(Et,io),t(F,lo),p(e,fa,s),w(Te,e,s),p(e,ma,s),p(e,Je,s),t(Je,po),p(e,ua,s),w(xe,e,s),p(e,da,s),p(e,te,s),t(te,co),t(te,Qe),t(Qe,ho),t(te,fo),p(e,ga,s),w(Ae,e,s),p(e,_a,s),p(e,ae,s),t(ae,mo),t(ae,Ce),t(Ce,zt),t(zt,uo),t(ae,go),p(e,ba,s),w(Fe,e,s),p(e,$a,s),p(e,se,s),t(se,_o),t(se,Pe),t(Pe,Tt),t(Tt,bo),t(se,$o),p(e,wa,s),w(De,e,s),p(e,va,s),w(oe,e,s),ya=!0},p(e,[s]){const Le={};s&2&&(Le.$$scope={dirty:s,ctx:e}),K.$set(Le);const xt={};s&2&&(xt.$$scope={dirty:s,ctx:e}),V.$set(xt);const At={};s&2&&(At.$$scope={dirty:s,ctx:e}),ee.$set(At);const Ct={};s&2&&(Ct.$$scope={dirty:s,ctx:e}),oe.$set(Ct)},i(e){ya||(v(d.$$.fragment,e),v(x.$$.fragment,e),v(K.$$.fragment,e),v(pe.$$.fragment,e),v(ce.$$.fragment,e),v(he.$$.fragment,e),v(fe.$$.fragment,e),v(me.$$.fragment,e),v(ue.$$.fragment,e),v(_e.$$.fragment,e),v($e.$$.fragment,e),v(we.$$.fragment,e),v(ve.$$.fragment,e),v(ye.$$.fragment,e),v(V.$$.fragment,e),v(Se.$$.fragment,e),v(Ee.$$.fragment,e),v(ee.$$.fragment,e),v(Te.$$.fragment,e),v(xe.$$.fragment,e),v(Ae.$$.fragment,e),v(Fe.$$.fragment,e),v(De.$$.fragment,e),v(oe.$$.fragment,e),ya=!0)},o(e){y(d.$$.fragment,e),y(x.$$.fragment,e),y(K.$$.fragment,e),y(pe.$$.fragment,e),y(ce.$$.fragment,e),y(he.$$.fragment,e),y(fe.$$.fragment,e),y(me.$$.fragment,e),y(ue.$$.fragment,e),y(_e.$$.fragment,e),y($e.$$.fragment,e),y(we.$$.fragment,e),y(ve.$$.fragment,e),y(ye.$$.fragment,e),y(V.$$.fragment,e),y(Se.$$.fragment,e),y(Ee.$$.fragment,e),y(ee.$$.fragment,e),y(Te.$$.fragment,e),y(xe.$$.fragment,e),y(Ae.$$.fragment,e),y(Fe.$$.fragment,e),y(De.$$.fragment,e),y(oe.$$.fragment,e),ya=!1},d(e){a(f),e&&a(k),e&&a(u),q(d),e&&a(j),q(x,e),e&&a(D),e&&a(Me),e&&a(Pt),e&&a(G),e&&a(Dt),e&&a(L),e&&a(Lt),q(K,e),e&&a(Mt),e&&a(U),q(pe),e&&a(It),e&&a(Ie),e&&a(Ot),q(ce,e),e&&a(Nt),e&&a(Oe),e&&a(Bt),q(he,e),e&&a(Ut),e&&a(Ne),e&&a(Ht),q(fe,e),e&&a(Wt),e&&a(M),e&&a(Yt),e&&a(H),q(me),e&&a(Gt),e&&a(I),e&&a(Kt),q(ue,e),e&&a(Xt),e&&a(Be),e&&a(Jt),e&&a(O),e&&a(Qt),q(_e,e),e&&a(Rt),e&&a(C),e&&a(Vt),q($e,e),e&&a(Zt),e&&a(T),e&&a(ea),q(we,e),e&&a(ta),e&&a(W),q(ve),e&&a(aa),e&&a(R),e&&a(sa),q(ye,e),e&&a(oa),q(V,e),e&&a(na),e&&a(We),e&&a(ra),e&&a(N),e&&a(ia),q(Se,e),e&&a(la),e&&a(Y),q(Ee),e&&a(pa),e&&a(Xe),e&&a(ca),q(ee,e),e&&a(ha),e&&a(F),e&&a(fa),q(Te,e),e&&a(ma),e&&a(Je),e&&a(ua),q(xe,e),e&&a(da),e&&a(te),e&&a(ga),q(Ae,e),e&&a(_a),e&&a(ae),e&&a(ba),q(Fe,e),e&&a($a),e&&a(se),e&&a(wa),q(De,e),e&&a(va),q(oe,e)}}}const xn={local:"summarization",sections:[{local:"load-billsum-dataset",title:"Load BillSum dataset"},{local:"preprocess",title:"Preprocess"},{local:"finetune-with-trainer",title:"Fine-tune with Trainer"},{local:"finetune-with-tensorflow",title:"Fine-tune with TensorFlow"}],title:"Summarization"};function An(P,f,k){let{fw:u}=f;return P.$$set=g=>{"fw"in g&&k(0,u=g.fw)},[u]}class On extends $n{constructor(f){super();wn(this,f,An,Tn,vn,{fw:0})}}export{On as default,xn as metadata};
417
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/internal/generation_utils.mdx-8b17d2b2.js
import{S as L2,i as x2,s as E2,e as o,k as c,w as m,t as a,M as P2,c as n,d as r,m as l,a as s,x as h,h as i,b as d,F as t,g as f,y as g,L as F2,q as u,o as _,B as v}from"../../chunks/vendor-4833417e.js";import{D as b}from"../../chunks/Docstring-4f315ed9.js";import{C as m$}from"../../chunks/CodeBlock-6a3d1b46.js";import{I as qe}from"../../chunks/IconCopyLink-4b81c553.js";import"../../chunks/CopyButton-dacfbfaf.js";function D2(yf){let Be,qn,N,H,Gi,Zt,kf,ji,wf,Jl,y,Lf,Bn,xf,Ef,In,Pf,Ff,An,Df,zf,Cn,Sf,Of,Nn,qf,Bf,Wn,If,Af,Vn,Cf,Nf,Ql,Mn,Wf,Zl,Ge,mt,Hi,er,Vf,Ri,Mf,ed,W,Gf,Gn,jf,Hf,jn,Rf,Kf,Hn,Uf,Yf,td,Rn,Xf,rd,tr,od,Ie,Jf,Ki,Qf,Zf,Kn,em,tm,nd,V,Un,Ui,rm,om,nm,Yn,Yi,sm,am,im,Xn,Xi,cm,lm,dm,Jn,Ji,pm,fm,sd,k,mm,Qi,hm,gm,Zi,um,_m,ec,vm,bm,tc,$m,Tm,rc,ym,km,oc,wm,Lm,ad,F,xm,nc,Em,Pm,sc,Fm,Dm,ac,zm,Sm,ic,Om,qm,id,D,Bm,cc,Im,Am,lc,Cm,Nm,dc,Wm,Vm,pc,Mm,Gm,cd,rr,ld,ht,jm,fc,Hm,Rm,dd,z,Km,mc,Um,Ym,hc,Xm,Jm,gc,Qm,Zm,uc,eh,th,pd,Qn,rh,fd,je,gt,_c,or,oh,vc,nh,md,He,nr,sh,bc,ah,hd,Re,sr,ih,$c,ch,gd,R,ar,lh,Tc,dh,ph,ut,ir,fh,yc,mh,ud,Ke,_t,kc,cr,hh,wc,gh,_d,Ue,lr,uh,Lc,_h,vd,Ye,dr,vh,xc,bh,bd,K,pr,$h,Ec,Th,yh,vt,fr,kh,Pc,wh,$d,Xe,bt,Fc,mr,Lh,Dc,xh,Td,Je,hr,Eh,zc,Ph,yd,Qe,gr,Fh,Sc,Dh,kd,Ze,$t,Oc,ur,zh,qc,Sh,wd,et,_r,Oh,Bc,qh,Ld,tt,vr,Bh,Ic,Ih,xd,rt,Tt,Ac,br,Ah,Cc,Ch,Ed,yt,Nh,Zn,Wh,Vh,Pd,U,$r,Mh,Nc,Gh,jh,kt,Tr,Hh,Wc,Rh,Fd,Y,yr,Kh,w,Uh,es,Yh,Xh,ts,Jh,Qh,Vc,Zh,eg,Mc,Gc,tg,rg,rs,og,ng,os,sg,ag,ig,ns,kr,Dd,X,wr,cg,jc,lg,dg,wt,Lr,pg,Hc,fg,zd,J,xr,mg,ss,as,hg,gg,ug,is,Er,Sd,Q,Pr,_g,cs,ls,vg,bg,$g,ds,Fr,Od,Z,Dr,Tg,ps,fs,yg,kg,wg,ms,zr,qd,ee,Sr,Lg,hs,gs,xg,Eg,Pg,us,Or,Bd,te,qr,Fg,_s,vs,Dg,zg,Sg,bs,Br,Id,re,Ir,Og,Lt,$s,qg,Bg,Ar,Ig,Ag,Cg,Ts,Cr,Ad,oe,Nr,Ng,ys,ks,Wg,Vg,Mg,ws,Wr,Cd,ne,Vr,Gg,xt,Ls,jg,Hg,Mr,Rg,Kg,Ug,xs,Gr,Nd,se,jr,Yg,Ae,Es,Xg,Jg,Ps,Qg,Zg,Hr,eu,tu,ru,Fs,Rr,Wd,ae,Kr,ou,Ds,zs,nu,su,au,Ss,Ur,Vd,ie,Yr,iu,Et,Os,cu,lu,Rc,du,pu,fu,qs,Xr,Md,ce,Jr,mu,M,Bs,hu,gu,Kc,uu,_u,Uc,vu,bu,Yc,$u,Tu,yu,Is,Qr,Gd,le,Zr,ku,Xc,wu,Lu,Pt,eo,xu,Jc,Eu,jd,de,to,Pu,B,Fu,As,Du,zu,Qc,Su,Ou,Zc,el,qu,Bu,Cs,Iu,Au,Cu,Ns,ro,Hd,pe,oo,Nu,tl,Wu,Vu,Ft,no,Mu,rl,Gu,Rd,fe,so,ju,Ws,Vs,Hu,Ru,Ku,Ms,ao,Kd,me,io,Uu,Gs,js,Yu,Xu,Ju,Hs,co,Ud,he,lo,Qu,Rs,Ks,Zu,e_,t_,Us,po,Yd,ge,fo,r_,Ys,Xs,o_,n_,s_,Js,mo,Xd,ue,ho,a_,Qs,Zs,i_,c_,l_,ea,go,Jd,_e,uo,d_,Dt,ta,p_,f_,_o,m_,h_,g_,ra,vo,Qd,ve,bo,u_,oa,na,__,v_,b_,sa,$o,Zd,be,To,$_,ol,T_,y_,zt,yo,k_,nl,w_,ep,$e,ko,L_,L,x_,aa,E_,P_,ia,F_,D_,sl,z_,S_,al,il,O_,q_,ca,B_,I_,la,A_,C_,N_,da,wo,tp,Te,Lo,W_,cl,V_,M_,St,xo,G_,ll,j_,rp,ye,Eo,H_,pa,fa,R_,K_,U_,ma,Po,op,ke,Fo,Y_,ha,ga,X_,J_,Q_,ua,Do,np,we,zo,Z_,_a,va,ev,tv,rv,ba,So,sp,Le,Oo,ov,$a,Ta,nv,sv,av,ya,qo,ap,xe,Bo,iv,Ot,ka,cv,lv,dl,dv,pv,fv,wa,Io,ip,Ee,Ao,mv,La,xa,hv,gv,uv,Ea,Co,cp,ot,qt,pl,No,_v,fl,vv,lp,Bt,bv,Pa,$v,Tv,dp,Pe,Wo,yv,ml,kv,wv,Fa,Vo,pp,nt,Mo,Lv,Da,Go,fp,Fe,jo,xv,Ho,Ev,hl,Pv,Fv,Dv,za,Ro,mp,De,Ko,zv,Uo,Sv,gl,Ov,qv,Bv,Sa,Yo,hp,st,It,ul,Xo,Iv,_l,Av,gp,At,Cv,Oa,Nv,Wv,up,$,Jo,Vv,vl,Mv,Gv,bl,jv,Hv,Qo,Rv,$l,Kv,Uv,Ct,Zo,Yv,Tl,Xv,Jv,Nt,en,Qv,yl,Zv,eb,Wt,tn,tb,kl,rb,ob,Vt,rn,nb,on,sb,wl,ab,ib,cb,Mt,nn,lb,Ll,db,pb,Gt,sn,fb,xl,mb,hb,Ce,an,gb,cn,ub,El,_b,vb,bb,Pl,$b,_p,at,ln,Tb,qa,Ba,yb,kb,vp,it,dn,wb,pn,Lb,Ia,xb,Eb,bp,I,fn,Pb,Fl,Fb,Db,E,mn,zb,Dl,Sb,Ob,ct,qb,zl,Bb,Ib,Sl,Ab,Cb,Nb,Ol,ql,Wb,Vb,ze,Mb,Bl,Gb,jb,Il,Hb,Rb,Al,Kb,Ub,Yb,Cl,Xb,Jb,jt,hn,Qb,Nl,Zb,$p,lt,Ht,Wl,gn,e1,Vl,t1,Tp,A,un,r1,dt,o1,Aa,n1,s1,Ca,a1,i1,c1,Na,_n,l1,Wa,vn,yp,x,bn,d1,Va,Ma,p1,f1,m1,$n,h1,Tn,g1,u1,_1,Ga,v1,yn,b1,$1,ja,kn,T1,Ha,wn,kp,C,Ln,y1,Ra,Ka,k1,w1,L1,Ua,xn,x1,Ya,En,wp,pt,Rt,Ml,Pn,E1,Gl,P1,Lp,Se,Fn,F1,jl,D1,z1,Xa,S1,Dn,O1,xp,Oe,zn,q1,Hl,B1,I1,Ja,A1,Sn,C1,Ep;return Zt=new qe({}),er=new qe({}),tr=new m$({props:{code:`from transformers import GPT2Tokenizer, GPT2LMHeadModel tokenizer = GPT2Tokenizer.from_pretrained("gpt2") model = GPT2LMHeadModel.from_pretrained("gpt2") inputs = tokenizer("Hello, my dog is cute and ", return_tensors="pt") generation_output = model.generate(**inputs, return_dict_in_generate=True, output_scores=True)`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPT2Tokenizer, GPT2LMHeadModel tokenizer = GPT2Tokenizer.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) model = GPT2LMHeadModel.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute and &quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) generation_output = model.generate(**inputs, return_dict_in_generate=<span class="hljs-literal">True</span>, output_scores=<span class="hljs-literal">True</span>)`}}),rr=new m$({props:{code:"generation_output[:2]",highlighted:'generation_output[:<span class="hljs-number">2</span>]'}}),or=new qe({}),nr=new b({props:{name:"class transformers.generation_utils.GreedySearchDecoderOnlyOutput",anchor:"transformers.generation_utils.GreedySearchDecoderOnlyOutput",parameters:[{name:"sequences",val:": LongTensor = None"},{name:"scores",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_utils.py#L62",parametersDescription:[{anchor:"transformers.generation_utils.GreedySearchDecoderOnlyOutput.sequences",description:`<strong>sequences</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; The generated sequences. The second dimension (sequence_length) is either equal to <code>max_length</code> or shorter if all batches finished early due to the <code>eos_token_id</code>.`,name:"sequences"},{anchor:"transformers.generation_utils.GreedySearchDecoderOnlyOutput.scores",description:`<strong>scores</strong> (<code>tuple(torch.FloatTensor)</code> <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) at each generation step. <code>(max_length-input_ids.shape[-1],)</code>-shaped tuple of <code>torch.FloatTensor</code> with each tensor of shape <code>(batch_size, config.vocab_size)</code>).`,name:"scores"},{anchor:"transformers.generation_utils.GreedySearchDecoderOnlyOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size, num_heads, generated_length, sequence_length)</code>.`,name:"attentions"},{anchor:"transformers.generation_utils.GreedySearchDecoderOnlyOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size, generated_length, hidden_size)</code>.`,name:"hidden_states"}]}}),sr=new b({props:{name:"class transformers.generation_utils.GreedySearchEncoderDecoderOutput",anchor:"transformers.generation_utils.GreedySearchEncoderDecoderOutput",parameters:[{name:"sequences",val:": LongTensor = None"},{name:"scores",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"encoder_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"encoder_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"decoder_attentions",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"},{name:"cross_attentions",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"},{name:"decoder_hidden_states",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_utils.py#L90",parametersDescription:[{anchor:"transformers.generation_utils.GreedySearchEncoderDecoderOutput.sequences",description:`<strong>sequences</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; The generated sequences. The second dimension (sequence_length) is either equal to <code>max_length</code> or shorter if all batches finished early due to the <code>eos_token_id</code>.`,name:"sequences"},{anchor:"transformers.generation_utils.GreedySearchEncoderDecoderOutput.scores",description:`<strong>scores</strong> (<code>tuple(torch.FloatTensor)</code> <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) at each generation step. <code>(max_length-1,)</code>-shaped tuple of <code>torch.FloatTensor</code> with each tensor of shape <code>(batch_size, config.vocab_size)</code>).`,name:"scores"},{anchor:"transformers.generation_utils.GreedySearchEncoderDecoderOutput.encoder_attentions",description:`<strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer of the decoder) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.`,name:"encoder_attentions"},{anchor:"transformers.generation_utils.GreedySearchEncoderDecoderOutput.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.`,name:"encoder_hidden_states"},{anchor:"transformers.generation_utils.GreedySearchEncoderDecoderOutput.decoder_attentions",description:`<strong>decoder_attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size, num_heads, generated_length, sequence_length)</code>.`,name:"decoder_attentions"},{anchor:"transformers.generation_utils.GreedySearchEncoderDecoderOutput.cross_attentions",description:`<strong>cross_attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size, num_heads, generated_length, sequence_length)</code>.`,name:"cross_attentions"},{anchor:"transformers.generation_utils.GreedySearchEncoderDecoderOutput.decoder_hidden_states",description:`<strong>decoder_hidden_states</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size, generated_length, hidden_size)</code>.`,name:"decoder_hidden_states"}]}}),ar=new b({props:{name:"class transformers.generation_flax_utils.FlaxGreedySearchOutput",anchor:"transformers.generation_flax_utils.FlaxGreedySearchOutput",parameters:[{name:"sequences",val:": ndarray = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_flax_utils.py#L45",parametersDescription:[{anchor:"transformers.generation_flax_utils.FlaxGreedySearchOutput.sequences",description:`<strong>sequences</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, max_length)</code>) &#x2014; The generated sequences.`,name:"sequences"}]}}),ir=new b({props:{name:"replace",anchor:"None",parameters:[{name:"**updates",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/flax/struct.py#L120"}}),cr=new qe({}),lr=new b({props:{name:"class transformers.generation_utils.SampleDecoderOnlyOutput",anchor:"transformers.generation_utils.SampleDecoderOnlyOutput",parameters:[{name:"sequences",val:": LongTensor = None"},{name:"scores",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_utils.py#L132",parametersDescription:[{anchor:"transformers.generation_utils.SampleDecoderOnlyOutput.sequences",description:`<strong>sequences</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size*num_return_sequences, sequence_length)</code>) &#x2014; The generated sequences. The second dimension (sequence_length) is either equal to <code>max_length</code> or shorter if all batches finished early due to the <code>eos_token_id</code>.`,name:"sequences"},{anchor:"transformers.generation_utils.SampleDecoderOnlyOutput.scores",description:`<strong>scores</strong> (<code>tuple(torch.FloatTensor)</code> <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) at each generation step. <code>(max_length-input_ids.shape[-1],)</code>-shaped tuple of <code>torch.FloatTensor</code> with each tensor of shape <code>(batch_size*num_return_sequences, config.vocab_size)</code>).`,name:"scores"},{anchor:"transformers.generation_utils.SampleDecoderOnlyOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(num_return_sequences*batch_size, num_heads, generated_length, sequence_length)</code>.`,name:"attentions"},{anchor:"transformers.generation_utils.SampleDecoderOnlyOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(num_return_sequences*batch_size, generated_length, hidden_size)</code>.`,name:"hidden_states"}]}}),dr=new b({props:{name:"class transformers.generation_utils.SampleEncoderDecoderOutput",anchor:"transformers.generation_utils.SampleEncoderDecoderOutput",parameters:[{name:"sequences",val:": LongTensor = None"},{name:"scores",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"encoder_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"encoder_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"decoder_attentions",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"},{name:"cross_attentions",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"},{name:"decoder_hidden_states",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_utils.py#L161",parametersDescription:[{anchor:"transformers.generation_utils.SampleEncoderDecoderOutput.sequences",description:`<strong>sequences</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size*num_return_sequences, sequence_length)</code>) &#x2014; The generated sequences. The second dimension (sequence_length) is either equal to <code>max_length</code> or shorter if all batches finished early due to the <code>eos_token_id</code>.`,name:"sequences"},{anchor:"transformers.generation_utils.SampleEncoderDecoderOutput.scores",description:`<strong>scores</strong> (<code>tuple(torch.FloatTensor)</code> <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) at each generation step. <code>(max_length-1,)</code>-shaped tuple of <code>torch.FloatTensor</code> with each tensor of shape <code>(batch_size*num_return_sequences, config.vocab_size)</code>).`,name:"scores"},{anchor:"transformers.generation_utils.SampleEncoderDecoderOutput.encoder_attentions",description:`<strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer of the decoder) of shape <code>(batch_size*num_return_sequences, num_heads, sequence_length, sequence_length)</code>.`,name:"encoder_attentions"},{anchor:"transformers.generation_utils.SampleEncoderDecoderOutput.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size*num_return_sequences, sequence_length, hidden_size)</code>.`,name:"encoder_hidden_states"},{anchor:"transformers.generation_utils.SampleEncoderDecoderOutput.decoder_attentions",description:`<strong>decoder_attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size*num_return_sequences, num_heads, generated_length, sequence_length)</code>.`,name:"decoder_attentions"},{anchor:"transformers.generation_utils.SampleEncoderDecoderOutput.cross_attentions",description:`<strong>cross_attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size, num_heads, generated_length, sequence_length)</code>.`,name:"cross_attentions"},{anchor:"transformers.generation_utils.SampleEncoderDecoderOutput.decoder_hidden_states",description:`<strong>decoder_hidden_states</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size*num_return_sequences, generated_length, hidden_size)</code>.`,name:"decoder_hidden_states"}]}}),pr=new b({props:{name:"class transformers.generation_flax_utils.FlaxSampleOutput",anchor:"transformers.generation_flax_utils.FlaxSampleOutput",parameters:[{name:"sequences",val:": ndarray = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_flax_utils.py#L59",parametersDescription:[{anchor:"transformers.generation_flax_utils.FlaxSampleOutput.sequences",description:`<strong>sequences</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, max_length)</code>) &#x2014; The generated sequences.`,name:"sequences"}]}}),fr=new b({props:{name:"replace",anchor:"None",parameters:[{name:"**updates",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/flax/struct.py#L120"}}),mr=new qe({}),hr=new b({props:{name:"class transformers.generation_utils.BeamSearchDecoderOnlyOutput",anchor:"transformers.generation_utils.BeamSearchDecoderOnlyOutput",parameters:[{name:"sequences",val:": LongTensor = None"},{name:"sequences_scores",val:": typing.Optional[torch.FloatTensor] = None"},{name:"scores",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"beam_indices",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.LongTensor]]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_utils.py#L204",parametersDescription:[{anchor:"transformers.generation_utils.BeamSearchDecoderOnlyOutput.sequences",description:`<strong>sequences</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size*num_return_sequences, sequence_length)</code>) &#x2014; The generated sequences. The second dimension (sequence_length) is either equal to <code>max_length</code> or shorter if all batches finished early due to the <code>eos_token_id</code>.`,name:"sequences"},{anchor:"transformers.generation_utils.BeamSearchDecoderOnlyOutput.sequences_scores",description:`<strong>sequences_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size*num_return_sequences)</code>, <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Final beam scores of the generated <code>sequences</code>.`,name:"sequences_scores"},{anchor:"transformers.generation_utils.BeamSearchDecoderOnlyOutput.scores",description:`<strong>scores</strong> (<code>tuple(torch.FloatTensor)</code> <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Beam transition scores for each vocabulary token at each generation step. Beam transition scores consisting of log probabilities of tokens conditioned on log softmax of previously generated tokens in this beam. <code>(max_length-input_ids.shape[-1],)</code>-shaped tuple of <code>torch.FloatTensor</code> with each tensor of shape <code>(batch_size*num_beams*num_return_sequences, config.vocab_size)</code>).`,name:"scores"},{anchor:"transformers.generation_utils.BeamSearchDecoderOnlyOutput.beam_indices",description:`<strong>beam_indices</strong> (<code>tuple(tuple(torch.LongTensor))</code>, <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Beam indices of generated token id at each generation step. <code>(batch_size*num_return_sequences)</code>-shaped tuple of <code>(max_length-input_ids.shape[-1],)</code>-shaped tuples of scalar <code>torch.LongTensor</code> tensors.`,name:"beam_indices"},{anchor:"transformers.generation_utils.BeamSearchDecoderOnlyOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size*num_beams, num_heads, generated_length, sequence_length)</code>.`,name:"attentions"},{anchor:"transformers.generation_utils.BeamSearchDecoderOnlyOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size*num_beams*num_return_sequences, generated_length, hidden_size)</code>.`,name:"hidden_states"}]}}),gr=new b({props:{name:"class transformers.generation_utils.BeamSearchEncoderDecoderOutput",anchor:"transformers.generation_utils.BeamSearchEncoderDecoderOutput",parameters:[{name:"sequences",val:": LongTensor = None"},{name:"sequences_scores",val:": typing.Optional[torch.FloatTensor] = None"},{name:"scores",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"beam_indices",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.LongTensor]]] = None"},{name:"encoder_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"encoder_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"decoder_attentions",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"},{name:"cross_attentions",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"},{name:"decoder_hidden_states",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_utils.py#L239",parametersDescription:[{anchor:"transformers.generation_utils.BeamSearchEncoderDecoderOutput.sequences",description:`<strong>sequences</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size*num_return_sequences, sequence_length)</code>) &#x2014; The generated sequences. The second dimension (sequence_length) is either equal to <code>max_length</code> or shorter if all batches finished early due to the <code>eos_token_id</code>.`,name:"sequences"},{anchor:"transformers.generation_utils.BeamSearchEncoderDecoderOutput.sequences_scores",description:`<strong>sequences_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size*num_return_sequences)</code>, <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Final beam scores of the generated <code>sequences</code>.`,name:"sequences_scores"},{anchor:"transformers.generation_utils.BeamSearchEncoderDecoderOutput.scores",description:`<strong>scores</strong> (<code>tuple(torch.FloatTensor)</code> <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Beam transition scores for each vocabulary token at each generation step. Beam transition scores consisting of log probabilities of tokens conditioned on log softmax of previously generated tokens in this beam. <code>(max_length-1,)</code>-shaped tuple of <code>torch.FloatTensor</code> with each tensor of shape <code>(batch_size*num_beams, config.vocab_size)</code>).`,name:"scores"},{anchor:"transformers.generation_utils.BeamSearchEncoderDecoderOutput.beam_indices",description:`<strong>beam_indices</strong> (<code>tuple(tuple(torch.LongTensor))</code>, <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Beam indices of generated token id at each generation step. <code>(batch_size*num_return_sequences)</code>-shaped tuple of <code>(max_length-1,)</code>-shaped tuples of scalar <code>torch.LongTensor</code> tensors.`,name:"beam_indices"},{anchor:"transformers.generation_utils.BeamSearchEncoderDecoderOutput.attentions",description:"<strong>attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014;",name:"attentions"},{anchor:"transformers.generation_utils.BeamSearchEncoderDecoderOutput.encoder_attentions",description:`<strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer of the decoder) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.`,name:"encoder_attentions"},{anchor:"transformers.generation_utils.BeamSearchEncoderDecoderOutput.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size*num_beams*num_return_sequences, sequence_length, hidden_size)</code>.`,name:"encoder_hidden_states"},{anchor:"transformers.generation_utils.BeamSearchEncoderDecoderOutput.decoder_attentions",description:`<strong>decoder_attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size*num_beams*num_return_sequences, num_heads, generated_length, sequence_length)</code>.`,name:"decoder_attentions"},{anchor:"transformers.generation_utils.BeamSearchEncoderDecoderOutput.cross_attentions",description:`<strong>cross_attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size, num_heads, generated_length, sequence_length)</code>.`,name:"cross_attentions"},{anchor:"transformers.generation_utils.BeamSearchEncoderDecoderOutput.decoder_hidden_states",description:`<strong>decoder_hidden_states</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size*num_beams*num_return_sequences, generated_length, hidden_size)</code>.`,name:"decoder_hidden_states"}]}}),ur=new qe({}),_r=new b({props:{name:"class transformers.generation_utils.BeamSampleDecoderOnlyOutput",anchor:"transformers.generation_utils.BeamSampleDecoderOnlyOutput",parameters:[{name:"sequences",val:": LongTensor = None"},{name:"sequences_scores",val:": typing.Optional[torch.FloatTensor] = None"},{name:"scores",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"beam_indices",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.LongTensor]]] = None"},{name:"attentions",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"},{name:"hidden_states",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_utils.py#L290",parametersDescription:[{anchor:"transformers.generation_utils.BeamSampleDecoderOnlyOutput.sequences",description:`<strong>sequences</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size*num_return_sequences, sequence_length)</code>) &#x2014; The generated sequences. The second dimension (sequence_length) is either equal to <code>max_length</code> or shorter if all batches finished early due to the <code>eos_token_id</code>.`,name:"sequences"},{anchor:"transformers.generation_utils.BeamSampleDecoderOnlyOutput.sequences_scores",description:`<strong>sequences_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_return_sequence)</code>, <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Final beam scores of the generated <code>sequences</code>.`,name:"sequences_scores"},{anchor:"transformers.generation_utils.BeamSampleDecoderOnlyOutput.scores",description:`<strong>scores</strong> (<code>tuple(torch.FloatTensor)</code> <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Beam transition scores for each vocabulary token at each generation step. Beam transition scores consisting of log probabilities of tokens conditioned on log softmax of previously generated tokens in this beam. <code>(max_length-input_ids.shape[-1],)</code>-shaped tuple of <code>torch.FloatTensor</code> with each tensor of shape <code>(batch_size*num_beams*num_return_sequences, config.vocab_size)</code>).`,name:"scores"},{anchor:"transformers.generation_utils.BeamSampleDecoderOnlyOutput.beam_indices",description:`<strong>beam_indices</strong> (<code>tuple(tuple(torch.LongTensor))</code>, <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Beam indices of generated token id at each generation step. <code>(batch_size*num_return_sequences)</code>-shaped tuple of <code>(max_length-input_ids.shape[-1],)</code>-shaped tuples of scalar <code>torch.LongTensor</code> tensors.`,name:"beam_indices"},{anchor:"transformers.generation_utils.BeamSampleDecoderOnlyOutput.attentions",description:`<strong>attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size*num_beams, num_heads, generated_length, sequence_length)</code>.`,name:"attentions"},{anchor:"transformers.generation_utils.BeamSampleDecoderOnlyOutput.hidden_states",description:`<strong>hidden_states</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size*num_beams, generated_length, hidden_size)</code>.`,name:"hidden_states"}]}}),vr=new b({props:{name:"class transformers.generation_utils.BeamSampleEncoderDecoderOutput",anchor:"transformers.generation_utils.BeamSampleEncoderDecoderOutput",parameters:[{name:"sequences",val:": LongTensor = None"},{name:"sequences_scores",val:": typing.Optional[torch.FloatTensor] = None"},{name:"scores",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"beam_indices",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.LongTensor]]] = None"},{name:"encoder_attentions",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"encoder_hidden_states",val:": typing.Optional[typing.Tuple[torch.FloatTensor]] = None"},{name:"decoder_attentions",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"},{name:"cross_attentions",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"},{name:"decoder_hidden_states",val:": typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_utils.py#L325",parametersDescription:[{anchor:"transformers.generation_utils.BeamSampleEncoderDecoderOutput.sequences",description:`<strong>sequences</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size*num_beams, sequence_length)</code>) &#x2014; The generated sequences. The second dimension (sequence_length) is either equal to <code>max_length</code> or shorter if all batches finished early due to the <code>eos_token_id</code>.`,name:"sequences"},{anchor:"transformers.generation_utils.BeamSampleEncoderDecoderOutput.sequences_scores",description:`<strong>sequences_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_return_sequence)</code>, <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Final beam scores of the generated <code>sequences</code>.`,name:"sequences_scores"},{anchor:"transformers.generation_utils.BeamSampleEncoderDecoderOutput.scores",description:`<strong>scores</strong> (<code>tuple(torch.FloatTensor)</code> <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Beam transition scores for each vocabulary token at each generation step. Beam transition scores consisting of log probabilities of tokens conditioned on log softmax of previously generated tokens in this beam. <code>(max_length-1,)</code>-shaped tuple of <code>torch.FloatTensor</code> with each tensor of shape <code>(batch_size*num_beams, config.vocab_size)</code>).`,name:"scores"},{anchor:"transformers.generation_utils.BeamSampleEncoderDecoderOutput.beam_indices",description:`<strong>beam_indices</strong> (<code>tuple(tuple(torch.LongTensor))</code>, <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Beam indices of generated token id at each generation step. <code>(batch_size*num_return_sequences)</code>-shaped tuple of <code>(max_length-1,)</code>-shaped tuples of scalar <code>torch.LongTensor</code> tensors.`,name:"beam_indices"},{anchor:"transformers.generation_utils.BeamSampleEncoderDecoderOutput.encoder_attentions",description:`<strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer of the decoder) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.`,name:"encoder_attentions"},{anchor:"transformers.generation_utils.BeamSampleEncoderDecoderOutput.encoder_hidden_states",description:`<strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size*num_beams, sequence_length, hidden_size)</code>.`,name:"encoder_hidden_states"},{anchor:"transformers.generation_utils.BeamSampleEncoderDecoderOutput.decoder_attentions",description:`<strong>decoder_attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size*num_beams, num_heads, generated_length, sequence_length)</code>.`,name:"decoder_attentions"},{anchor:"transformers.generation_utils.BeamSampleEncoderDecoderOutput.cross_attentions",description:`<strong>cross_attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size, num_heads, generated_length, sequence_length)</code>.`,name:"cross_attentions"},{anchor:"transformers.generation_utils.BeamSampleEncoderDecoderOutput.decoder_hidden_states",description:`<strong>decoder_hidden_states</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size*num_beams, generated_length, hidden_size)</code>.`,name:"decoder_hidden_states"}]}}),br=new qe({}),$r=new b({props:{name:"class transformers.LogitsProcessor",anchor:"transformers.LogitsProcessor",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L51"}}),Tr=new b({props:{name:"__call__",anchor:"transformers.LogitsProcessor.__call__",parameters:[{name:"input_ids",val:": LongTensor"},{name:"scores",val:": FloatTensor"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L54",parametersDescription:[{anchor:"transformers.LogitsProcessor.__call__.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.LogitsProcessor.__call__.scores",description:`<strong>scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs &#x2014; Additional logits processor specific kwargs.`,name:"scores"}],returnDescription:` <p>The processed prediction scores.</p> `,returnType:` <p><code>torch.FloatTensor</code> of shape <code>(batch_size, config.vocab_size)</code></p> `}}),yr=new b({props:{name:"class transformers.LogitsProcessorList",anchor:"transformers.LogitsProcessorList",parameters:[{name:"iterable",val:" = ()"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L73"}}),kr=new b({props:{name:"__call__",anchor:"transformers.LogitsProcessorList.__call__",parameters:[{name:"input_ids",val:": LongTensor"},{name:"scores",val:": FloatTensor"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L80",parametersDescription:[{anchor:"transformers.LogitsProcessorList.__call__.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.LogitsProcessorList.__call__.scores",description:`<strong>scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs &#x2014; Additional logits processor specific kwargs.`,name:"scores"}],returnDescription:` <p>The processed prediction scores.</p> `,returnType:` <p><code>torch.FloatTensor</code> of shape <code>(batch_size, config.vocab_size)</code></p> `}}),wr=new b({props:{name:"class transformers.LogitsWarper",anchor:"transformers.LogitsWarper",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L62"}}),Lr=new b({props:{name:"__call__",anchor:"transformers.LogitsWarper.__call__",parameters:[{name:"input_ids",val:": LongTensor"},{name:"scores",val:": FloatTensor"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L65",parametersDescription:[{anchor:"transformers.LogitsWarper.__call__.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.LogitsWarper.__call__.scores",description:`<strong>scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs &#x2014; Additional logits processor specific kwargs.`,name:"scores"}],returnDescription:` <p>The processed prediction scores.</p> `,returnType:` <p><code>torch.FloatTensor</code> of shape <code>(batch_size, config.vocab_size)</code></p> `}}),xr=new b({props:{name:"class transformers.MinLengthLogitsProcessor",anchor:"transformers.MinLengthLogitsProcessor",parameters:[{name:"min_length",val:": int"},{name:"eos_token_id",val:": int"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L96",parametersDescription:[{anchor:"transformers.MinLengthLogitsProcessor.min_length",description:`<strong>min_length</strong> (<code>int</code>) &#x2014; The minimum length below which the score of <code>eos_token_id</code> is set to <code>-float(&quot;Inf&quot;)</code>.`,name:"min_length"},{anchor:"transformers.MinLengthLogitsProcessor.eos_token_id",description:`<strong>eos_token_id</strong> (<code>int</code>) &#x2014; The id of the <em>end-of-sequence</em> token.`,name:"eos_token_id"}]}}),Er=new b({props:{name:"__call__",anchor:"transformers.MinLengthLogitsProcessor.__call__",parameters:[{name:"input_ids",val:": LongTensor"},{name:"scores",val:": FloatTensor"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L117"}}),Pr=new b({props:{name:"class transformers.TemperatureLogitsWarper",anchor:"transformers.TemperatureLogitsWarper",parameters:[{name:"temperature",val:": float"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L124",parametersDescription:[{anchor:"transformers.TemperatureLogitsWarper.temperature",description:`<strong>temperature</strong> (<code>float</code>) &#x2014; The value used to module the logits distribution.`,name:"temperature"}]}}),Fr=new b({props:{name:"__call__",anchor:"transformers.TemperatureLogitsWarper.__call__",parameters:[{name:"input_ids",val:": Tensor"},{name:"scores",val:": Tensor"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L139"}}),Dr=new b({props:{name:"class transformers.RepetitionPenaltyLogitsProcessor",anchor:"transformers.RepetitionPenaltyLogitsProcessor",parameters:[{name:"penalty",val:": float"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L144",parametersDescription:[{anchor:"transformers.RepetitionPenaltyLogitsProcessor.repetition_penalty",description:`<strong>repetition_penalty</strong> (<code>float</code>) &#x2014; The parameter for repetition penalty. 1.0 means no penalty. See <a href="https://arxiv.org/pdf/1909.05858.pdf" rel="nofollow">this paper</a> for more details.`,name:"repetition_penalty"}]}}),zr=new b({props:{name:"__call__",anchor:"transformers.RepetitionPenaltyLogitsProcessor.__call__",parameters:[{name:"input_ids",val:": LongTensor"},{name:"scores",val:": FloatTensor"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L160"}}),Sr=new b({props:{name:"class transformers.TopPLogitsWarper",anchor:"transformers.TopPLogitsWarper",parameters:[{name:"top_p",val:": float"},{name:"filter_value",val:": float = -inf"},{name:"min_tokens_to_keep",val:": int = 1"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L170",parametersDescription:[{anchor:"transformers.TopPLogitsWarper.top_p",description:`<strong>top_p</strong> (<code>float</code>) &#x2014; If set to &lt; 1, only the most probable tokens with probabilities that add up to <code>top_p</code> or higher are kept for generation.`,name:"top_p"},{anchor:"transformers.TopPLogitsWarper.filter_value",description:`<strong>filter_value</strong> (<code>float</code>, <em>optional</em>, defaults to <code>-float(&quot;Inf&quot;)</code>) &#x2014; All filtered values will be set to this float value.`,name:"filter_value"},{anchor:"transformers.TopPLogitsWarper.min_tokens_to_keep",description:`<strong>min_tokens_to_keep</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Minimum number of tokens that cannot be filtered.`,name:"min_tokens_to_keep"}]}}),Or=new b({props:{name:"__call__",anchor:"transformers.TopPLogitsWarper.__call__",parameters:[{name:"input_ids",val:": LongTensor"},{name:"scores",val:": FloatTensor"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L193"}}),qr=new b({props:{name:"class transformers.TopKLogitsWarper",anchor:"transformers.TopKLogitsWarper",parameters:[{name:"top_k",val:": int"},{name:"filter_value",val:": float = -inf"},{name:"min_tokens_to_keep",val:": int = 1"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L212",parametersDescription:[{anchor:"transformers.TopKLogitsWarper.top_k",description:`<strong>top_k</strong> (<code>int</code>) &#x2014; The number of highest probability vocabulary tokens to keep for top-k-filtering.`,name:"top_k"},{anchor:"transformers.TopKLogitsWarper.filter_value",description:`<strong>filter_value</strong> (<code>float</code>, <em>optional</em>, defaults to <code>-float(&quot;Inf&quot;)</code>) &#x2014; All filtered values will be set to this float value.`,name:"filter_value"},{anchor:"transformers.TopKLogitsWarper.min_tokens_to_keep",description:`<strong>min_tokens_to_keep</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Minimum number of tokens that cannot be filtered.`,name:"min_tokens_to_keep"}]}}),Br=new b({props:{name:"__call__",anchor:"transformers.TopKLogitsWarper.__call__",parameters:[{name:"input_ids",val:": LongTensor"},{name:"scores",val:": FloatTensor"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L233"}}),Ir=new b({props:{name:"class transformers.NoRepeatNGramLogitsProcessor",anchor:"transformers.NoRepeatNGramLogitsProcessor",parameters:[{name:"ngram_size",val:": int"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L309",parametersDescription:[{anchor:"transformers.NoRepeatNGramLogitsProcessor.ngram_size",description:`<strong>ngram_size</strong> (<code>int</code>) &#x2014; All ngrams of size <code>ngram_size</code> can only occur once.`,name:"ngram_size"}]}}),Cr=new b({props:{name:"__call__",anchor:"transformers.NoRepeatNGramLogitsProcessor.__call__",parameters:[{name:"input_ids",val:": LongTensor"},{name:"scores",val:": FloatTensor"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L324"}}),Nr=new b({props:{name:"class transformers.NoBadWordsLogitsProcessor",anchor:"transformers.NoBadWordsLogitsProcessor",parameters:[{name:"bad_words_ids",val:": typing.List[typing.List[int]]"},{name:"eos_token_id",val:": int"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L376",parametersDescription:[{anchor:"transformers.NoBadWordsLogitsProcessor.bad_words_ids",description:`<strong>bad_words_ids</strong> (<code>List[List[int]]</code>) &#x2014; List of list of token ids that are not allowed to be generated. In order to get the token ids of the words that should not appear in the generated text, use <code>tokenizer(bad_words, add_prefix_space=True, add_special_tokens=False).input_ids</code>.`,name:"bad_words_ids"},{anchor:"transformers.NoBadWordsLogitsProcessor.eos_token_id",description:`<strong>eos_token_id</strong> (<code>int</code>) &#x2014; The id of the <em>end-of-sequence</em> token.`,name:"eos_token_id"}]}}),Wr=new b({props:{name:"__call__",anchor:"transformers.NoBadWordsLogitsProcessor.__call__",parameters:[{name:"input_ids",val:": LongTensor"},{name:"scores",val:": FloatTensor"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L418"}}),Vr=new b({props:{name:"class transformers.PrefixConstrainedLogitsProcessor",anchor:"transformers.PrefixConstrainedLogitsProcessor",parameters:[{name:"prefix_allowed_tokens_fn",val:": typing.Callable[[int, torch.Tensor], typing.List[int]]"},{name:"num_beams",val:": int"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L504"}}),Gr=new b({props:{name:"__call__",anchor:"transformers.PrefixConstrainedLogitsProcessor.__call__",parameters:[{name:"input_ids",val:": LongTensor"},{name:"scores",val:": FloatTensor"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L521"}}),jr=new b({props:{name:"class transformers.HammingDiversityLogitsProcessor",anchor:"transformers.HammingDiversityLogitsProcessor",parameters:[{name:"diversity_penalty",val:": float"},{name:"num_beams",val:": int"},{name:"num_beam_groups",val:": int"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L530",parametersDescription:[{anchor:"transformers.HammingDiversityLogitsProcessor.diversity_penalty",description:`<strong>diversity_penalty</strong> (<code>float</code>) &#x2014; This value is subtracted from a beam&#x2019;s score if it generates a token same as any beam from other group at a particular time. Note that <code>diversity_penalty</code> is only effective if <code>group beam search</code> is enabled.`,name:"diversity_penalty"},{anchor:"transformers.HammingDiversityLogitsProcessor.num_beams",description:`<strong>num_beams</strong> (<code>int</code>) &#x2014; Number of beams used for group beam search. See <a href="https://arxiv.org/pdf/1610.02424.pdf" rel="nofollow">this paper</a> for more details.`,name:"num_beams"},{anchor:"transformers.HammingDiversityLogitsProcessor.num_beam_groups",description:`<strong>num_beam_groups</strong> (<code>int</code>) &#x2014; Number of groups to divide <code>num_beams</code> into in order to ensure diversity among different groups of beams. See <a href="https://arxiv.org/pdf/1610.02424.pdf" rel="nofollow">this paper</a> for more details.`,name:"num_beam_groups"}]}}),Rr=new b({props:{name:"__call__",anchor:"transformers.HammingDiversityLogitsProcessor.__call__",parameters:[{name:"input_ids",val:": LongTensor"},{name:"scores",val:": FloatTensor"},{name:"current_tokens",val:": LongTensor"},{name:"beam_group_idx",val:": int"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L561"}}),Kr=new b({props:{name:"class transformers.ForcedBOSTokenLogitsProcessor",anchor:"transformers.ForcedBOSTokenLogitsProcessor",parameters:[{name:"bos_token_id",val:": int"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L590",parametersDescription:[{anchor:"transformers.ForcedBOSTokenLogitsProcessor.bos_token_id",description:`<strong>bos_token_id</strong> (<code>int</code>) &#x2014; The id of the token to force as the first generated token.`,name:"bos_token_id"}]}}),Ur=new b({props:{name:"__call__",anchor:"transformers.ForcedBOSTokenLogitsProcessor.__call__",parameters:[{name:"input_ids",val:": LongTensor"},{name:"scores",val:": FloatTensor"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L602"}}),Yr=new b({props:{name:"class transformers.ForcedEOSTokenLogitsProcessor",anchor:"transformers.ForcedEOSTokenLogitsProcessor",parameters:[{name:"max_length",val:": int"},{name:"eos_token_id",val:": int"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L611",parametersDescription:[{anchor:"transformers.ForcedEOSTokenLogitsProcessor.max_length",description:`<strong>max_length</strong> (<code>int</code>) &#x2014; The maximum length of the sequence to be generated.`,name:"max_length"},{anchor:"transformers.ForcedEOSTokenLogitsProcessor.eos_token_id",description:`<strong>eos_token_id</strong> (<code>int</code>) &#x2014; The id of the token to force as the last generated token when <code>max_length</code> is reached.`,name:"eos_token_id"}]}}),Xr=new b({props:{name:"__call__",anchor:"transformers.ForcedEOSTokenLogitsProcessor.__call__",parameters:[{name:"input_ids",val:": LongTensor"},{name:"scores",val:": FloatTensor"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L626"}}),Jr=new b({props:{name:"class transformers.InfNanRemoveLogitsProcessor",anchor:"transformers.InfNanRemoveLogitsProcessor",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L635"}}),Qr=new b({props:{name:"__call__",anchor:"transformers.InfNanRemoveLogitsProcessor.__call__",parameters:[{name:"input_ids",val:": LongTensor"},{name:"scores",val:": FloatTensor"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L642"}}),Zr=new b({props:{name:"class transformers.TFLogitsProcessor",anchor:"transformers.TFLogitsProcessor",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_tf_logits_process.py#L50"}}),eo=new b({props:{name:"__call__",anchor:"transformers.TFLogitsProcessor.__call__",parameters:[{name:"input_ids",val:": Tensor"},{name:"scores",val:": Tensor"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_tf_logits_process.py#L53",parametersDescription:[{anchor:"transformers.TFLogitsProcessor.__call__.input_ids",description:`<strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFLogitsProcessor.__call__.scores",description:`<strong>scores</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs &#x2014; Additional logits processor specific kwargs.`,name:"scores"}],returnDescription:` <p>The processed prediction scores.</p> `,returnType:` <p><code>tf.Tensor</code> of shape <code>(batch_size, config.vocab_size)</code></p> `}}),to=new b({props:{name:"class transformers.TFLogitsProcessorList",anchor:"transformers.TFLogitsProcessorList",parameters:[{name:"iterable",val:" = ()"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_tf_logits_process.py#L72"}}),ro=new b({props:{name:"__call__",anchor:"transformers.TFLogitsProcessorList.__call__",parameters:[{name:"input_ids",val:": Tensor"},{name:"scores",val:": Tensor"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_tf_logits_process.py#L79",parametersDescription:[{anchor:"transformers.TFLogitsProcessorList.__call__.input_ids",description:`<strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFLogitsProcessorList.__call__.scores",description:`<strong>scores</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs &#x2014; Additional logits processor specific kwargs.`,name:"scores"}],returnDescription:` <p>The processed prediction scores.</p> `,returnType:` <p><code>tf.Tensor</code> of shape <code>(batch_size, config.vocab_size)</code></p> `}}),oo=new b({props:{name:"class transformers.TFLogitsWarper",anchor:"transformers.TFLogitsWarper",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_tf_logits_process.py#L61"}}),no=new b({props:{name:"__call__",anchor:"transformers.TFLogitsWarper.__call__",parameters:[{name:"input_ids",val:": Tensor"},{name:"scores",val:": Tensor"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_tf_logits_process.py#L64",parametersDescription:[{anchor:"transformers.TFLogitsWarper.__call__.input_ids",description:`<strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.TFLogitsWarper.__call__.scores",description:`<strong>scores</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs &#x2014; Additional logits processor specific kwargs.`,name:"scores"}],returnDescription:` <p>The processed prediction scores.</p> `,returnType:` <p><code>tf.Tensor</code> of shape <code>(batch_size, config.vocab_size)</code></p> `}}),so=new b({props:{name:"class transformers.TFTemperatureLogitsWarper",anchor:"transformers.TFTemperatureLogitsWarper",parameters:[{name:"temperature",val:": float"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_tf_logits_process.py#L95",parametersDescription:[{anchor:"transformers.TFTemperatureLogitsWarper.temperature",description:`<strong>temperature</strong> (<code>float</code>) &#x2014; The value used to module the logits distribution.`,name:"temperature"}]}}),ao=new b({props:{name:"__call__",anchor:"transformers.TFTemperatureLogitsWarper.__call__",parameters:[{name:"input_ids",val:": Tensor"},{name:"scores",val:": Tensor"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_tf_logits_process.py#L110"}}),io=new b({props:{name:"class transformers.TFTopPLogitsWarper",anchor:"transformers.TFTopPLogitsWarper",parameters:[{name:"top_p",val:": float"},{name:"filter_value",val:": float = -inf"},{name:"min_tokens_to_keep",val:": int = 1"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_tf_logits_process.py#L144",parametersDescription:[{anchor:"transformers.TFTopPLogitsWarper.top_p",description:`<strong>top_p</strong> (<code>float</code>) &#x2014; If set to &lt; 1, only the most probable tokens with probabilities that add up to <code>top_p</code> or higher are kept for generation.`,name:"top_p"},{anchor:"transformers.TFTopPLogitsWarper.filter_value",description:`<strong>filter_value</strong> (<code>float</code>, <em>optional</em>, defaults to <code>-float(&quot;Inf&quot;)</code>) &#x2014; All filtered values will be set to this float value.`,name:"filter_value"},{anchor:"transformers.TFTopPLogitsWarper.min_tokens_to_keep",description:`<strong>min_tokens_to_keep</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Minimum number of tokens that cannot be filtered.`,name:"min_tokens_to_keep"}]}}),co=new b({props:{name:"__call__",anchor:"transformers.TFTopPLogitsWarper.__call__",parameters:[{name:"input_ids",val:": Tensor"},{name:"scores",val:": Tensor"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_tf_logits_process.py#L166"}}),lo=new b({props:{name:"class transformers.TFTopKLogitsWarper",anchor:"transformers.TFTopKLogitsWarper",parameters:[{name:"top_k",val:": int"},{name:"filter_value",val:": float = -inf"},{name:"min_tokens_to_keep",val:": int = 1"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_tf_logits_process.py#L115",parametersDescription:[{anchor:"transformers.TFTopKLogitsWarper.top_k",description:`<strong>top_k</strong> (<code>int</code>) &#x2014; The number of highest probability vocabulary tokens to keep for top-k-filtering.`,name:"top_k"},{anchor:"transformers.TFTopKLogitsWarper.filter_value",description:`<strong>filter_value</strong> (<code>float</code>, <em>optional</em>, defaults to <code>-float(&quot;Inf&quot;)</code>) &#x2014; All filtered values will be set to this float value.`,name:"filter_value"},{anchor:"transformers.TFTopKLogitsWarper.min_tokens_to_keep",description:`<strong>min_tokens_to_keep</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Minimum number of tokens that cannot be filtered.`,name:"min_tokens_to_keep"}]}}),po=new b({props:{name:"__call__",anchor:"transformers.TFTopKLogitsWarper.__call__",parameters:[{name:"input_ids",val:": Tensor"},{name:"scores",val:": Tensor"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_tf_logits_process.py#L136"}}),fo=new b({props:{name:"class transformers.TFMinLengthLogitsProcessor",anchor:"transformers.TFMinLengthLogitsProcessor",parameters:[{name:"min_length",val:": int"},{name:"eos_token_id",val:": int"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_tf_logits_process.py#L198",parametersDescription:[{anchor:"transformers.TFMinLengthLogitsProcessor.min_length",description:`<strong>min_length</strong> (<code>int</code>) &#x2014; The minimum length below which the score of <code>eos_token_id</code> is set to <code>-float(&quot;Inf&quot;)</code>.`,name:"min_length"},{anchor:"transformers.TFMinLengthLogitsProcessor.eos_token_id",description:`<strong>eos_token_id</strong> (<code>int</code>) &#x2014; The id of the <em>end-of-sequence</em> token.`,name:"eos_token_id"}]}}),mo=new b({props:{name:"__call__",anchor:"transformers.TFMinLengthLogitsProcessor.__call__",parameters:[{name:"input_ids",val:": Tensor"},{name:"scores",val:": Tensor"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_tf_logits_process.py#L219"}}),ho=new b({props:{name:"class transformers.TFNoBadWordsLogitsProcessor",anchor:"transformers.TFNoBadWordsLogitsProcessor",parameters:[{name:"bad_words_ids",val:": typing.List[typing.List[int]]"},{name:"eos_token_id",val:": int"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_tf_logits_process.py#L271",parametersDescription:[{anchor:"transformers.TFNoBadWordsLogitsProcessor.bad_words_ids",description:`<strong>bad_words_ids</strong> (<code>List[List[int]]</code>) &#x2014; List of list of token ids that are not allowed to be generated. In order to get the tokens of the words that should not appear in the generated text, use <code>tokenizer(bad_word, add_prefix_space=True).input_ids</code>.`,name:"bad_words_ids"},{anchor:"transformers.TFNoBadWordsLogitsProcessor.eos_token_id",description:`<strong>eos_token_id</strong> (<code>int</code>) &#x2014; The id of the <em>end-of-sequence</em> token.`,name:"eos_token_id"}]}}),go=new b({props:{name:"__call__",anchor:"transformers.TFNoBadWordsLogitsProcessor.__call__",parameters:[{name:"input_ids",val:": Tensor"},{name:"scores",val:": Tensor"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_tf_logits_process.py#L334"}}),uo=new b({props:{name:"class transformers.TFNoRepeatNGramLogitsProcessor",anchor:"transformers.TFNoRepeatNGramLogitsProcessor",parameters:[{name:"ngram_size",val:": int"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_tf_logits_process.py#L354",parametersDescription:[{anchor:"transformers.TFNoRepeatNGramLogitsProcessor.ngram_size",description:`<strong>ngram_size</strong> (<code>int</code>) &#x2014; All ngrams of size <code>ngram_size</code> can only occur once.`,name:"ngram_size"}]}}),vo=new b({props:{name:"__call__",anchor:"transformers.TFNoRepeatNGramLogitsProcessor.__call__",parameters:[{name:"input_ids",val:": Tensor"},{name:"scores",val:": Tensor"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_tf_logits_process.py#L392"}}),bo=new b({props:{name:"class transformers.TFRepetitionPenaltyLogitsProcessor",anchor:"transformers.TFRepetitionPenaltyLogitsProcessor",parameters:[{name:"penalty",val:": float"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_tf_logits_process.py#L233",parametersDescription:[{anchor:"transformers.TFRepetitionPenaltyLogitsProcessor.repetition_penalty",description:`<strong>repetition_penalty</strong> (<code>float</code>) &#x2014; The parameter for repetition penalty. 1.0 means no penalty. See <a href="https://arxiv.org/pdf/1909.05858.pdf" rel="nofollow">this paper</a> for more details.`,name:"repetition_penalty"}]}}),$o=new b({props:{name:"__call__",anchor:"transformers.TFRepetitionPenaltyLogitsProcessor.__call__",parameters:[{name:"input_ids",val:": Tensor"},{name:"scores",val:": Tensor"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_tf_logits_process.py#L262"}}),To=new b({props:{name:"class transformers.FlaxLogitsProcessor",anchor:"transformers.FlaxLogitsProcessor",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_flax_logits_process.py#L50"}}),yo=new b({props:{name:"__call__",anchor:"transformers.FlaxLogitsProcessor.__call__",parameters:[{name:"input_ids",val:": ndarray"},{name:"scores",val:": ndarray"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_flax_logits_process.py#L53",parametersDescription:[{anchor:"transformers.FlaxLogitsProcessor.__call__.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxLogitsProcessor.__call__.scores",description:`<strong>scores</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs &#x2014; Additional logits processor specific kwargs.`,name:"scores"}],returnDescription:` <p>The processed prediction scores.</p> `,returnType:` <p><code>jnp.ndarray</code> of shape <code>(batch_size, config.vocab_size)</code></p> `}}),ko=new b({props:{name:"class transformers.FlaxLogitsProcessorList",anchor:"transformers.FlaxLogitsProcessorList",parameters:[{name:"iterable",val:" = ()"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_flax_logits_process.py#L72"}}),wo=new b({props:{name:"__call__",anchor:"transformers.FlaxLogitsProcessorList.__call__",parameters:[{name:"input_ids",val:": ndarray"},{name:"scores",val:": ndarray"},{name:"cur_len",val:": int"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_flax_logits_process.py#L79",parametersDescription:[{anchor:"transformers.FlaxLogitsProcessorList.__call__.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxLogitsProcessorList.__call__.scores",description:`<strong>scores</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs &#x2014; Additional logits processor specific kwargs.`,name:"scores"}],returnDescription:` <p>The processed prediction scores.</p> `,returnType:` <p><code>jnp.ndarray</code> of shape <code>(batch_size, config.vocab_size)</code></p> `}}),Lo=new b({props:{name:"class transformers.FlaxLogitsWarper",anchor:"transformers.FlaxLogitsWarper",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_flax_logits_process.py#L61"}}),xo=new b({props:{name:"__call__",anchor:"transformers.FlaxLogitsWarper.__call__",parameters:[{name:"input_ids",val:": ndarray"},{name:"scores",val:": ndarray"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_flax_logits_process.py#L64",parametersDescription:[{anchor:"transformers.FlaxLogitsWarper.__call__.input_ids",description:`<strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.FlaxLogitsWarper.__call__.scores",description:`<strong>scores</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs &#x2014; Additional logits processor specific kwargs.`,name:"scores"}],returnDescription:` <p>The processed prediction scores.</p> `,returnType:` <p><code>jnp.ndarray</code> of shape <code>(batch_size, config.vocab_size)</code></p> `}}),Eo=new b({props:{name:"class transformers.FlaxTemperatureLogitsWarper",anchor:"transformers.FlaxTemperatureLogitsWarper",parameters:[{name:"temperature",val:": float"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_flax_logits_process.py#L95",parametersDescription:[{anchor:"transformers.FlaxTemperatureLogitsWarper.temperature",description:`<strong>temperature</strong> (<code>float</code>) &#x2014; The value used to module the logits distribution.`,name:"temperature"}]}}),Po=new b({props:{name:"__call__",anchor:"transformers.FlaxTemperatureLogitsWarper.__call__",parameters:[{name:"input_ids",val:": ndarray"},{name:"scores",val:": ndarray"},{name:"cur_len",val:": int"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_flax_logits_process.py#L110"}}),Fo=new b({props:{name:"class transformers.FlaxTopPLogitsWarper",anchor:"transformers.FlaxTopPLogitsWarper",parameters:[{name:"top_p",val:": float"},{name:"filter_value",val:": float = -inf"},{name:"min_tokens_to_keep",val:": int = 1"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_flax_logits_process.py#L115",parametersDescription:[{anchor:"transformers.FlaxTopPLogitsWarper.top_p",description:`<strong>top_p</strong> (<code>float</code>) &#x2014; If set to &lt; 1, only the most probable tokens with probabilities that add up to <code>top_p</code> or higher are kept for generation.`,name:"top_p"},{anchor:"transformers.FlaxTopPLogitsWarper.filter_value",description:`<strong>filter_value</strong> (<code>float</code>, <em>optional</em>, defaults to <code>-float(&quot;Inf&quot;)</code>) &#x2014; All filtered values will be set to this float value.`,name:"filter_value"},{anchor:"transformers.FlaxTopPLogitsWarper.min_tokens_to_keep",description:`<strong>min_tokens_to_keep</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Minimum number of tokens that cannot be filtered.`,name:"min_tokens_to_keep"}]}}),Do=new b({props:{name:"__call__",anchor:"transformers.FlaxTopPLogitsWarper.__call__",parameters:[{name:"input_ids",val:": ndarray"},{name:"scores",val:": ndarray"},{name:"cur_len",val:": int"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_flax_logits_process.py#L137"}}),zo=new b({props:{name:"class transformers.FlaxTopKLogitsWarper",anchor:"transformers.FlaxTopKLogitsWarper",parameters:[{name:"top_k",val:": int"},{name:"filter_value",val:": float = -inf"},{name:"min_tokens_to_keep",val:": int = 1"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_flax_logits_process.py#L156",parametersDescription:[{anchor:"transformers.FlaxTopKLogitsWarper.top_k",description:`<strong>top_k</strong> (<code>int</code>) &#x2014; The number of highest probability vocabulary tokens to keep for top-k-filtering.`,name:"top_k"},{anchor:"transformers.FlaxTopKLogitsWarper.filter_value",description:`<strong>filter_value</strong> (<code>float</code>, <em>optional</em>, defaults to <code>-float(&quot;Inf&quot;)</code>) &#x2014; All filtered values will be set to this float value.`,name:"filter_value"},{anchor:"transformers.FlaxTopKLogitsWarper.min_tokens_to_keep",description:`<strong>min_tokens_to_keep</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Minimum number of tokens that cannot be filtered.`,name:"min_tokens_to_keep"}]}}),So=new b({props:{name:"__call__",anchor:"transformers.FlaxTopKLogitsWarper.__call__",parameters:[{name:"input_ids",val:": ndarray"},{name:"scores",val:": ndarray"},{name:"cur_len",val:": int"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_flax_logits_process.py#L177"}}),Oo=new b({props:{name:"class transformers.FlaxForcedBOSTokenLogitsProcessor",anchor:"transformers.FlaxForcedBOSTokenLogitsProcessor",parameters:[{name:"bos_token_id",val:": int"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_flax_logits_process.py#L192",parametersDescription:[{anchor:"transformers.FlaxForcedBOSTokenLogitsProcessor.bos_token_id",description:`<strong>bos_token_id</strong> (<code>int</code>) &#x2014; The id of the token to force as the first generated token.`,name:"bos_token_id"}]}}),qo=new b({props:{name:"__call__",anchor:"transformers.FlaxForcedBOSTokenLogitsProcessor.__call__",parameters:[{name:"input_ids",val:": ndarray"},{name:"scores",val:": ndarray"},{name:"cur_len",val:": int"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_flax_logits_process.py#L204"}}),Bo=new b({props:{name:"class transformers.FlaxForcedEOSTokenLogitsProcessor",anchor:"transformers.FlaxForcedEOSTokenLogitsProcessor",parameters:[{name:"max_length",val:": int"},{name:"eos_token_id",val:": int"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_flax_logits_process.py#L216",parametersDescription:[{anchor:"transformers.FlaxForcedEOSTokenLogitsProcessor.max_length",description:`<strong>max_length</strong> (<code>int</code>) &#x2014; The maximum length of the sequence to be generated.`,name:"max_length"},{anchor:"transformers.FlaxForcedEOSTokenLogitsProcessor.eos_token_id",description:`<strong>eos_token_id</strong> (<code>int</code>) &#x2014; The id of the token to force as the last generated token when <code>max_length</code> is reached.`,name:"eos_token_id"}]}}),Io=new b({props:{name:"__call__",anchor:"transformers.FlaxForcedEOSTokenLogitsProcessor.__call__",parameters:[{name:"input_ids",val:": ndarray"},{name:"scores",val:": ndarray"},{name:"cur_len",val:": int"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_flax_logits_process.py#L231"}}),Ao=new b({props:{name:"class transformers.FlaxMinLengthLogitsProcessor",anchor:"transformers.FlaxMinLengthLogitsProcessor",parameters:[{name:"min_length",val:": int"},{name:"eos_token_id",val:": int"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_flax_logits_process.py#L243",parametersDescription:[{anchor:"transformers.FlaxMinLengthLogitsProcessor.min_length",description:`<strong>min_length</strong> (<code>int</code>) &#x2014; The minimum length below which the score of <code>eos_token_id</code> is set to <code>-float(&quot;Inf&quot;)</code>.`,name:"min_length"},{anchor:"transformers.FlaxMinLengthLogitsProcessor.eos_token_id",description:`<strong>eos_token_id</strong> (<code>int</code>) &#x2014; The id of the <em>end-of-sequence</em> token.`,name:"eos_token_id"}]}}),Co=new b({props:{name:"__call__",anchor:"transformers.FlaxMinLengthLogitsProcessor.__call__",parameters:[{name:"input_ids",val:": ndarray"},{name:"scores",val:": ndarray"},{name:"cur_len",val:": int"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_flax_logits_process.py#L264"}}),No=new qe({}),Wo=new b({props:{name:"class transformers.StoppingCriteria",anchor:"transformers.StoppingCriteria",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_stopping_criteria.py#L33"}}),Vo=new b({props:{name:"__call__",anchor:"transformers.StoppingCriteria.__call__",parameters:[{name:"input_ids",val:": LongTensor"},{name:"scores",val:": FloatTensor"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_stopping_criteria.py#L36",parametersDescription:[{anchor:"transformers.StoppingCriteria.__call__.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.StoppingCriteria.__call__.scores",description:`<strong>scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs &#x2014; Additional stopping criteria specific kwargs.`,name:"scores"}],returnDescription:` <p><code>bool</code>. <code>False</code> indicates we should continue, <code>True</code> indicates we should stop.</p> `}}),Mo=new b({props:{name:"class transformers.StoppingCriteriaList",anchor:"transformers.StoppingCriteriaList",parameters:[{name:"iterable",val:" = ()"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_stopping_criteria.py#L110"}}),Go=new b({props:{name:"__call__",anchor:"transformers.StoppingCriteriaList.__call__",parameters:[{name:"input_ids",val:": LongTensor"},{name:"scores",val:": FloatTensor"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_stopping_criteria.py#L111",parametersDescription:[{anchor:"transformers.StoppingCriteriaList.__call__.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.StoppingCriteriaList.__call__.scores",description:`<strong>scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs &#x2014; Additional stopping criteria specific kwargs.`,name:"scores"}],returnDescription:` <p><code>bool</code>. <code>False</code> indicates we should continue, <code>True</code> indicates we should stop.</p> `}}),jo=new b({props:{name:"class transformers.MaxLengthCriteria",anchor:"transformers.MaxLengthCriteria",parameters:[{name:"max_length",val:": int"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_stopping_criteria.py#L41",parametersDescription:[{anchor:"transformers.MaxLengthCriteria.max_length",description:`<strong>max_length</strong> (<code>int</code>) &#x2014; The maximum length that the output sequence can have in number of tokens.`,name:"max_length"}]}}),Ro=new b({props:{name:"__call__",anchor:"transformers.MaxLengthCriteria.__call__",parameters:[{name:"input_ids",val:": LongTensor"},{name:"scores",val:": FloatTensor"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_stopping_criteria.py#L54",parametersDescription:[{anchor:"transformers.MaxLengthCriteria.__call__.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.MaxLengthCriteria.__call__.scores",description:`<strong>scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs &#x2014; Additional stopping criteria specific kwargs.`,name:"scores"}],returnDescription:` <p><code>bool</code>. <code>False</code> indicates we should continue, <code>True</code> indicates we should stop.</p> `}}),Ko=new b({props:{name:"class transformers.MaxTimeCriteria",anchor:"transformers.MaxTimeCriteria",parameters:[{name:"max_time",val:": float"},{name:"initial_timestamp",val:": typing.Optional[float] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_stopping_criteria.py#L88",parametersDescription:[{anchor:"transformers.MaxTimeCriteria.max_time",description:`<strong>max_time</strong> (<code>float</code>) &#x2014; The maximum allowed time in seconds for the generation.`,name:"max_time"},{anchor:"transformers.MaxTimeCriteria.initial_time",description:`<strong>initial_time</strong> (<code>float</code>, <em>optional</em>, defaults to <code>time.time()</code>) &#x2014; The start of the generation allowed time.`,name:"initial_time"}]}}),Yo=new b({props:{name:"__call__",anchor:"transformers.MaxTimeCriteria.__call__",parameters:[{name:"input_ids",val:": LongTensor"},{name:"scores",val:": FloatTensor"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_stopping_criteria.py#L105",parametersDescription:[{anchor:"transformers.MaxTimeCriteria.__call__.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.MaxTimeCriteria.__call__.scores",description:`<strong>scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs &#x2014; Additional stopping criteria specific kwargs.`,name:"scores"}],returnDescription:` <p><code>bool</code>. <code>False</code> indicates we should continue, <code>True</code> indicates we should stop.</p> `}}),Xo=new qe({}),Jo=new b({props:{name:"class transformers.Constraint",anchor:"transformers.Constraint",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_beam_constraints.py#L5"}}),Qo=new m$({props:{code:`completed = False while not completed: _, completed = constraint.update(constraint.advance())`,highlighted:`completed = <span class="hljs-literal">False</span> <span class="hljs-keyword">while</span> <span class="hljs-keyword">not</span> completed: _, completed = constraint.update(constraint.advance())`}}),Zo=new b({props:{name:"advance",anchor:"transformers.Constraint.advance",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_beam_constraints.py#L48",returnDescription:` <p>Must be a tensor of a list of indexable tokens, not some integer.</p> `,returnType:` <p>token_ids(<code>torch.tensor</code>)</p> `}}),en=new b({props:{name:"copy",anchor:"transformers.Constraint.copy",parameters:[{name:"stateful",val:" = False"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_beam_constraints.py#L113",returnDescription:` <p>The same constraint as the one being called from.</p> `,returnType:` <p>constraint(<code>Constraint</code>)</p> `}}),tn=new b({props:{name:"does_advance",anchor:"transformers.Constraint.does_advance",parameters:[{name:"token_id",val:": int"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_beam_constraints.py#L60"}}),rn=new b({props:{name:"remaining",anchor:"transformers.Constraint.remaining",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_beam_constraints.py#L104"}}),nn=new b({props:{name:"reset",anchor:"transformers.Constraint.reset",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_beam_constraints.py#L94"}}),sn=new b({props:{name:"test",anchor:"transformers.Constraint.test",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_beam_constraints.py#L24"}}),an=new b({props:{name:"update",anchor:"transformers.Constraint.update",parameters:[{name:"token_id",val:": int"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_beam_constraints.py#L69",returnDescription:` <p>Whether this constraint has become one step closer to being fulfuilled. completed(<code>bool</code>): Whether this constraint has been completely fulfilled by this token being generated. reset (<code>bool</code>): Whether this constraint has reset its progress by this token being generated.</p> `,returnType:` <p>stepped(<code>bool</code>)</p> `}}),ln=new b({props:{name:"class transformers.PhrasalConstraint",anchor:"transformers.PhrasalConstraint",parameters:[{name:"token_ids",val:": typing.List[int]"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_beam_constraints.py#L129",parametersDescription:[{anchor:"transformers.PhrasalConstraint.token_ids",description:`<strong>token_ids</strong> (<code>List[int]</code>) &#x2014; The id of the token that must be generated by the output.`,name:"token_ids"}]}}),dn=new b({props:{name:"class transformers.DisjunctiveConstraint",anchor:"transformers.DisjunctiveConstraint",parameters:[{name:"nested_token_ids",val:": typing.List[typing.List[int]]"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_beam_constraints.py#L260",parametersDescription:[{anchor:"transformers.DisjunctiveConstraint.nested_token_ids",description:"<strong>nested_token_ids</strong> (<code>List[List[int]]</code>) &#x2014; a list of words, where each word is a list of ids. This constraint",name:"nested_token_ids"},{anchor:"transformers.DisjunctiveConstraint.is",description:"<strong>is</strong> fulfilled by generating just one from the list of words. &#x2014;",name:"is"}]}}),fn=new b({props:{name:"class transformers.ConstraintListState",anchor:"transformers.ConstraintListState",parameters:[{name:"constraints",val:": typing.List[transformers.generation_beam_constraints.Constraint]"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_beam_constraints.py#L349",parametersDescription:[{anchor:"transformers.ConstraintListState.constraints",description:`<strong>constraints</strong> (<code>List[Constraint]</code>) &#x2014; A list of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.Constraint">Constraint</a> objects that must be fulfilled by the beam scorer.`,name:"constraints"}]}}),mn=new b({props:{name:"advance",anchor:"transformers.ConstraintListState.advance",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_beam_constraints.py#L381"}}),hn=new b({props:{name:"reset",anchor:"transformers.ConstraintListState.reset",parameters:[{name:"token_ids",val:": typing.Optional[typing.List[int]]"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_beam_constraints.py#L416"}}),gn=new qe({}),un=new b({props:{name:"class transformers.BeamScorer",anchor:"transformers.BeamScorer",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_beam_search.py#L88"}}),_n=new b({props:{name:"process",anchor:"transformers.BeamScorer.process",parameters:[{name:"input_ids",val:": LongTensor"},{name:"next_scores",val:": FloatTensor"},{name:"next_tokens",val:": LongTensor"},{name:"next_indices",val:": LongTensor"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_beam_search.py#L94",parametersDescription:[{anchor:"transformers.BeamScorer.process.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size * num_beams, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using any class inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.BeamScorer.process.next_scores",description:`<strong>next_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, 2 * num_beams)</code>) &#x2014; Current scores of the top <code>2 * num_beams</code> non-finished beam hypotheses.`,name:"next_scores"},{anchor:"transformers.BeamScorer.process.next_tokens",description:`<strong>next_tokens</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, 2 * num_beams)</code>) &#x2014; <code>input_ids</code> of the tokens corresponding to the top <code>2 * num_beams</code> non-finished beam hypotheses.`,name:"next_tokens"},{anchor:"transformers.BeamScorer.process.next_indices",description:`<strong>next_indices</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, 2 * num_beams)</code>) &#x2014; Beam indices indicating to which beam hypothesis the <code>next_tokens</code> correspond.`,name:"next_indices"},{anchor:"transformers.BeamScorer.process.pad_token_id",description:`<strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.`,name:"pad_token_id"},{anchor:"transformers.BeamScorer.process.eos_token_id",description:`<strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.`,name:"eos_token_id"}],returnDescription:` <p>A dictionary composed of the fields as defined above:</p> <ul> <li><strong>next_beam_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_beams)</code>) \u2014 Updated scores of all non-finished beams.</li> <li><strong>next_beam_tokens</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_beams)</code>) \u2014 Next tokens to be added to the non-finished beam_hypotheses.</li> <li><strong>next_beam_indices</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_beams)</code>) \u2014 Beam indices indicating to which beam the next tokens shall be added.</li> </ul> `,returnType:` <p><code>UserDict</code></p> `}}),vn=new b({props:{name:"finalize",anchor:"transformers.BeamScorer.finalize",parameters:[{name:"input_ids",val:": LongTensor"},{name:"next_scores",val:": FloatTensor"},{name:"next_tokens",val:": LongTensor"},{name:"next_indices",val:": LongTensor"},{name:"max_length",val:": int"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_beam_search.py#L106",parametersDescription:[{anchor:"transformers.BeamScorer.finalize.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size * num_beams, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using any class inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.BeamScorer.finalize.final_beam_scores",description:`<strong>final_beam_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_beams)</code>) &#x2014; The final scores of all non-finished beams.`,name:"final_beam_scores"},{anchor:"transformers.BeamScorer.finalize.final_beam_tokens",description:`<strong>final_beam_tokens</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_beams)</code>) &#x2014; The last tokens to be added to the non-finished beam_hypotheses.`,name:"final_beam_tokens"},{anchor:"transformers.BeamScorer.finalize.final_beam_indices",description:`<strong>final_beam_indices</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_beams)</code>) &#x2014; The beam indices indicating to which beam the <code>final_beam_tokens</code> shall be added.`,name:"final_beam_indices"},{anchor:"transformers.BeamScorer.finalize.pad_token_id",description:`<strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.`,name:"pad_token_id"},{anchor:"transformers.BeamScorer.finalize.eos_token_id",description:`<strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.`,name:"eos_token_id"}],returnDescription:` <p>The generated sequences. The second dimension (sequence_length) is either equal to <code>max_length</code> or shorter if all batches finished early due to the <code>eos_token_id</code>.</p> `,returnType:` <p><code>torch.LongTensor</code> of shape <code>(batch_size * num_return_sequences, sequence_length)</code></p> `}}),bn=new b({props:{name:"class transformers.BeamSearchScorer",anchor:"transformers.BeamSearchScorer",parameters:[{name:"batch_size",val:": int"},{name:"num_beams",val:": int"},{name:"device",val:": device"},{name:"length_penalty",val:": typing.Optional[float] = 1.0"},{name:"do_early_stopping",val:": typing.Optional[bool] = False"},{name:"num_beam_hyps_to_keep",val:": typing.Optional[int] = 1"},{name:"num_beam_groups",val:": typing.Optional[int] = 1"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_beam_search.py#L120",parametersDescription:[{anchor:"transformers.BeamSearchScorer.batch_size",description:`<strong>batch_size</strong> (<code>int</code>) &#x2014; Batch Size of <code>input_ids</code> for which standard beam search decoding is run in parallel.`,name:"batch_size"},{anchor:"transformers.BeamSearchScorer.max_length",description:`<strong>max_length</strong> (<code>int</code>) &#x2014; The maximum length of the sequence to be generated.`,name:"max_length"},{anchor:"transformers.BeamSearchScorer.num_beams",description:`<strong>num_beams</strong> (<code>int</code>) &#x2014; Number of beams for beam search.`,name:"num_beams"},{anchor:"transformers.BeamSearchScorer.device",description:`<strong>device</strong> (<code>torch.device</code>) &#x2014; Defines the device type (<em>e.g.</em>, <code>&quot;cpu&quot;</code> or <code>&quot;cuda&quot;</code>) on which this instance of <code>BeamSearchScorer</code> will be allocated.`,name:"device"},{anchor:"transformers.BeamSearchScorer.length_penalty",description:`<strong>length_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; Exponential penalty to the length. 1.0 means no penalty. Set to values &lt; 1.0 in order to encourage the model to generate shorter sequences, to a value &gt; 1.0 in order to encourage the model to produce longer sequences.`,name:"length_penalty"},{anchor:"transformers.BeamSearchScorer.do_early_stopping",description:`<strong>do_early_stopping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to stop the beam search when at least <code>num_beams</code> sentences are finished per batch or not.`,name:"do_early_stopping"},{anchor:"transformers.BeamSearchScorer.num_beam_hyps_to_keep",description:`<strong>num_beam_hyps_to_keep</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; The number of beam hypotheses that shall be returned upon calling <code>finalize</code>.`,name:"num_beam_hyps_to_keep"},{anchor:"transformers.BeamSearchScorer.num_beam_groups",description:`<strong>num_beam_groups</strong> (<code>int</code>) &#x2014; Number of groups to divide <code>num_beams</code> into in order to ensure diversity among different groups of beams. See <a href="https://arxiv.org/pdf/1610.02424.pdf" rel="nofollow">this paper</a> for more details.`,name:"num_beam_groups"}]}}),kn=new b({props:{name:"process",anchor:"transformers.BeamSearchScorer.process",parameters:[{name:"input_ids",val:": LongTensor"},{name:"next_scores",val:": FloatTensor"},{name:"next_tokens",val:": LongTensor"},{name:"next_indices",val:": LongTensor"},{name:"pad_token_id",val:": typing.Optional[int] = None"},{name:"eos_token_id",val:": typing.Optional[int] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_beam_search.py#L206"}}),wn=new b({props:{name:"finalize",anchor:"transformers.BeamSearchScorer.finalize",parameters:[{name:"input_ids",val:": LongTensor"},{name:"final_beam_scores",val:": FloatTensor"},{name:"final_beam_tokens",val:": LongTensor"},{name:"final_beam_indices",val:": LongTensor"},{name:"max_length",val:": int"},{name:"pad_token_id",val:": typing.Optional[int] = None"},{name:"eos_token_id",val:": typing.Optional[int] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_beam_search.py#L291"}}),Ln=new b({props:{name:"class transformers.ConstrainedBeamSearchScorer",anchor:"transformers.ConstrainedBeamSearchScorer",parameters:[{name:"batch_size",val:": int"},{name:"num_beams",val:": int"},{name:"constraints",val:": typing.List[transformers.generation_beam_constraints.Constraint]"},{name:"device",val:": device"},{name:"length_penalty",val:": typing.Optional[float] = 1.0"},{name:"do_early_stopping",val:": typing.Optional[bool] = False"},{name:"num_beam_hyps_to_keep",val:": typing.Optional[int] = 1"},{name:"num_beam_groups",val:": typing.Optional[int] = 1"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_beam_search.py#L356",parametersDescription:[{anchor:"transformers.ConstrainedBeamSearchScorer.batch_size",description:`<strong>batch_size</strong> (<code>int</code>) &#x2014; Batch Size of <code>input_ids</code> for which standard beam search decoding is run in parallel.`,name:"batch_size"},{anchor:"transformers.ConstrainedBeamSearchScorer.max_length",description:`<strong>max_length</strong> (<code>int</code>) &#x2014; The maximum length of the sequence to be generated.`,name:"max_length"},{anchor:"transformers.ConstrainedBeamSearchScorer.num_beams",description:`<strong>num_beams</strong> (<code>int</code>) &#x2014; Number of beams for beam search.`,name:"num_beams"},{anchor:"transformers.ConstrainedBeamSearchScorer.constraints",description:`<strong>constraints</strong> (<code>List[Constraint]</code>) &#x2014; A list of positive constraints represented as <code>Constraint</code> objects that must be fulfilled in the generation output. For more information, the documentation of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.Constraint">Constraint</a> should be read.`,name:"constraints"},{anchor:"transformers.ConstrainedBeamSearchScorer.device",description:`<strong>device</strong> (<code>torch.device</code>) &#x2014; Defines the device type (<em>e.g.</em>, <code>&quot;cpu&quot;</code> or <code>&quot;cuda&quot;</code>) on which this instance of <code>BeamSearchScorer</code> will be allocated.`,name:"device"},{anchor:"transformers.ConstrainedBeamSearchScorer.length_penalty",description:`<strong>length_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; Exponential penalty to the length. 1.0 means no penalty. Set to values &lt; 1.0 in order to encourage the model to generate shorter sequences, to a value &gt; 1.0 in order to encourage the model to produce longer sequences.`,name:"length_penalty"},{anchor:"transformers.ConstrainedBeamSearchScorer.do_early_stopping",description:`<strong>do_early_stopping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to stop the beam search when at least <code>num_beams</code> sentences are finished per batch or not.`,name:"do_early_stopping"},{anchor:"transformers.ConstrainedBeamSearchScorer.num_beam_hyps_to_keep",description:`<strong>num_beam_hyps_to_keep</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; The number of beam hypotheses that shall be returned upon calling <code>finalize</code>.`,name:"num_beam_hyps_to_keep"},{anchor:"transformers.ConstrainedBeamSearchScorer.num_beam_groups",description:`<strong>num_beam_groups</strong> (<code>int</code>) &#x2014; Number of groups to divide <code>num_beams</code> into in order to ensure diversity among different groups of beams. See <a href="https://arxiv.org/pdf/1610.02424.pdf" rel="nofollow">this paper</a> for more details.`,name:"num_beam_groups"}]}}),xn=new b({props:{name:"process",anchor:"transformers.ConstrainedBeamSearchScorer.process",parameters:[{name:"input_ids",val:": LongTensor"},{name:"next_scores",val:": FloatTensor"},{name:"next_tokens",val:": LongTensor"},{name:"next_indices",val:": LongTensor"},{name:"scores_for_all_vocab",val:": FloatTensor"},{name:"pad_token_id",val:": typing.Optional[int] = None"},{name:"eos_token_id",val:": typing.Optional[int] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_beam_search.py#L450",parametersDescription:[{anchor:"transformers.ConstrainedBeamSearchScorer.process.input_ids",description:`<strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size * num_beams, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using any class inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a>`,name:"input_ids"},{anchor:"transformers.ConstrainedBeamSearchScorer.process.next_scores",description:`<strong>next_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, 2 * num_beams)</code>) &#x2014; Current scores of the top <code>2 * num_beams</code> non-finished beam hypotheses.`,name:"next_scores"},{anchor:"transformers.ConstrainedBeamSearchScorer.process.next_tokens",description:`<strong>next_tokens</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, 2 * num_beams)</code>) &#x2014; <code>input_ids</code> of the tokens corresponding to the top <code>2 * num_beams</code> non-finished beam hypotheses.`,name:"next_tokens"},{anchor:"transformers.ConstrainedBeamSearchScorer.process.next_indices",description:`<strong>next_indices</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, 2 * num_beams)</code>) &#x2014; Beam indices indicating to which beam hypothesis the <code>next_tokens</code> correspond.`,name:"next_indices"},{anchor:"transformers.ConstrainedBeamSearchScorer.process.scores_for_all_vocab",description:`<strong>scores_for_all_vocab</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_beams, sequence_length)</code>) &#x2014; The scores of all tokens in the vocabulary for each of the beam hypotheses.`,name:"scores_for_all_vocab"},{anchor:"transformers.ConstrainedBeamSearchScorer.process.pad_token_id",description:`<strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.`,name:"pad_token_id"},{anchor:"transformers.ConstrainedBeamSearchScorer.process.eos_token_id",description:`<strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.`,name:"eos_token_id"}],returnDescription:` <p>A dictionary composed of the fields as defined above:</p> <ul> <li> <p><strong>next_beam_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_beams)</code>) \u2014 Updated scores of all non-finished beams.</p> </li> <li> <p><strong>next_beam_tokens</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_beams)</code>) \u2014 Next tokens to be added to the non-finished beam_hypotheses.</p> </li> <li> <p><strong>next_beam_indices</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_beams)</code>) \u2014 Beam indices indicating to which beam the next tokens shall be added.</p> </li> </ul> `,returnType:` <p><code>UserDict</code></p> `}}),En=new b({props:{name:"finalize",anchor:"transformers.ConstrainedBeamSearchScorer.finalize",parameters:[{name:"input_ids",val:": LongTensor"},{name:"final_beam_scores",val:": FloatTensor"},{name:"final_beam_tokens",val:": LongTensor"},{name:"final_beam_indices",val:": LongTensor"},{name:"max_length",val:": int"},{name:"pad_token_id",val:": typing.Optional[int] = None"},{name:"eos_token_id",val:": typing.Optional[int] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_beam_search.py#L725"}}),Pn=new qe({}),Fn=new b({props:{name:"transformers.top_k_top_p_filtering",anchor:"transformers.top_k_top_p_filtering",parameters:[{name:"logits",val:": FloatTensor"},{name:"top_k",val:": int = 0"},{name:"top_p",val:": float = 1.0"},{name:"filter_value",val:": float = -inf"},{name:"min_tokens_to_keep",val:": int = 1"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_utils.py#L3290",parametersDescription:[{anchor:"transformers.top_k_top_p_filtering.top_k",description:`<strong>top_k</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If &gt; 0, only keep the top k tokens with highest probability (top-k filtering)`,name:"top_k"},{anchor:"transformers.top_k_top_p_filtering.top_p",description:`<strong>top_p</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; If &lt; 1.0, only keep the top tokens with cumulative probability &gt;= top_p (nucleus filtering). Nucleus filtering is described in Holtzman et al. (<a href="http://arxiv.org/abs/1904.09751" rel="nofollow">http://arxiv.org/abs/1904.09751</a>)`,name:"top_p"},{anchor:"transformers.top_k_top_p_filtering.min_tokens_to_keep",description:`<strong>min_tokens_to_keep</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Minimumber of tokens we keep per batch example in the output.`,name:"min_tokens_to_keep"}]}}),zn=new b({props:{name:"transformers.tf_top_k_top_p_filtering",anchor:"transformers.tf_top_k_top_p_filtering",parameters:[{name:"logits",val:""},{name:"top_k",val:" = 0"},{name:"top_p",val:" = 1.0"},{name:"filter_value",val:" = -inf"},{name:"min_tokens_to_keep",val:" = 1"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_tf_utils.py#L2270",parametersDescription:[{anchor:"transformers.tf_top_k_top_p_filtering.top_k",description:`<strong>top_k</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If &gt; 0, only keep the top k tokens with highest probability (top-k filtering)`,name:"top_k"},{anchor:"transformers.tf_top_k_top_p_filtering.top_p",description:`<strong>top_p</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; If &lt; 1.0, only keep the top tokens with cumulative probability &gt;= top_p (nucleus filtering). Nucleus filtering is described in Holtzman et al. (<a href="http://arxiv.org/abs/1904.09751" rel="nofollow">http://arxiv.org/abs/1904.09751</a>)`,name:"top_p"},{anchor:"transformers.tf_top_k_top_p_filtering.min_tokens_to_keep",description:`<strong>min_tokens_to_keep</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Minimumber of tokens we keep per batch example in the output.`,name:"min_tokens_to_keep"}]}}),{c(){Be=o("meta"),qn=c(),N=o("h1"),H=o("a"),Gi=o("span"),m(Zt.$$.fragment),kf=c(),ji=o("span"),wf=a("Utilities for Generation"),Jl=c(),y=o("p"),Lf=a("This page lists all the utility functions used by "),Bn=o("a"),xf=a("generate()"),Ef=a(`, `),In=o("a"),Pf=a("greedy_search()"),Ff=a(`, `),An=o("a"),Df=a("sample()"),zf=a(`, `),Cn=o("a"),Sf=a("beam_search()"),Of=a(`, `),Nn=o("a"),qf=a("beam_sample()"),Bf=a(`, `),Wn=o("a"),If=a("group_beam_search()"),Af=a(`, and `),Vn=o("a"),Cf=a("constrained_beam_search()"),Nf=a("."),Ql=c(),Mn=o("p"),Wf=a("Most of those are only useful if you are studying the code of the generate methods in the library."),Zl=c(),Ge=o("h2"),mt=o("a"),Hi=o("span"),m(er.$$.fragment),Vf=c(),Ri=o("span"),Mf=a("Generate Outputs"),ed=c(),W=o("p"),Gf=a("The output of "),Gn=o("a"),jf=a("generate()"),Hf=a(` is an instance of a subclass of `),jn=o("a"),Rf=a("ModelOutput"),Kf=a(`. This output is a data structure containing all the information returned by `),Hn=o("a"),Uf=a("generate()"),Yf=a(", but that can also be used as tuple or dictionary."),td=c(),Rn=o("p"),Xf=a("Here\u2019s an example:"),rd=c(),m(tr.$$.fragment),od=c(),Ie=o("p"),Jf=a("The "),Ki=o("code"),Qf=a("generation_output"),Zf=a(" object is a "),Kn=o("a"),em=a("GreedySearchDecoderOnlyOutput"),tm=a(`, as we can see in the documentation of that class below, it means it has the following attributes:`),nd=c(),V=o("ul"),Un=o("li"),Ui=o("code"),rm=a("sequences"),om=a(": the generated sequences of tokens"),nm=c(),Yn=o("li"),Yi=o("code"),sm=a("scores"),am=a(" (optional): the prediction scores of the language modelling head, for each generation step"),im=c(),Xn=o("li"),Xi=o("code"),cm=a("hidden_states"),lm=a(" (optional): the hidden states of the model, for each generation step"),dm=c(),Jn=o("li"),Ji=o("code"),pm=a("attentions"),fm=a(" (optional): the attention weights of the model, for each generation step"),sd=c(),k=o("p"),mm=a("Here we have the "),Qi=o("code"),hm=a("scores"),gm=a(" since we passed along "),Zi=o("code"),um=a("output_scores=True"),_m=a(", but we don\u2019t have "),ec=o("code"),vm=a("hidden_states"),bm=a(` and `),tc=o("code"),$m=a("attentions"),Tm=a(" because we didn\u2019t pass "),rc=o("code"),ym=a("output_hidden_states=True"),km=a(" or "),oc=o("code"),wm=a("output_attentions=True"),Lm=a("."),ad=c(),F=o("p"),xm=a(`You can access each attribute as you would usually do, and if that attribute has not been returned by the model, you will get `),nc=o("code"),Em=a("None"),Pm=a(". Here for instance "),sc=o("code"),Fm=a("generation_output.scores"),Dm=a(` are all the generated prediction scores of the language modeling head, and `),ac=o("code"),zm=a("generation_output.attentions"),Sm=a(" is "),ic=o("code"),Om=a("None"),qm=a("."),id=c(),D=o("p"),Bm=a("When using our "),cc=o("code"),Im=a("generation_output"),Am=a(" object as a tuple, it only keeps the attributes that don\u2019t have "),lc=o("code"),Cm=a("None"),Nm=a(` values. Here, for instance, it has two elements, `),dc=o("code"),Wm=a("loss"),Vm=a(" then "),pc=o("code"),Mm=a("logits"),Gm=a(", so"),cd=c(),m(rr.$$.fragment),ld=c(),ht=o("p"),jm=a("will return the tuple "),fc=o("code"),Hm=a("(generation_output.sequences, generation_output.scores)"),Rm=a(" for instance."),dd=c(),z=o("p"),Km=a("When using our "),mc=o("code"),Um=a("generation_output"),Ym=a(" object as a dictionary, it only keeps the attributes that don\u2019t have "),hc=o("code"),Xm=a("None"),Jm=a(` values. Here, for instance, it has two keys that are `),gc=o("code"),Qm=a("sequences"),Zm=a(" and "),uc=o("code"),eh=a("scores"),th=a("."),pd=c(),Qn=o("p"),rh=a("We document here all output types."),fd=c(),je=o("h3"),gt=o("a"),_c=o("span"),m(or.$$.fragment),oh=c(),vc=o("span"),nh=a("GreedySearchOutput"),md=c(),He=o("div"),m(nr.$$.fragment),sh=c(),bc=o("p"),ah=a("Base class for outputs of decoder-only generation models using greedy search."),hd=c(),Re=o("div"),m(sr.$$.fragment),ih=c(),$c=o("p"),ch=a(`Base class for outputs of encoder-decoder generation models using greedy search. Hidden states and attention weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)`),gd=c(),R=o("div"),m(ar.$$.fragment),lh=c(),Tc=o("p"),dh=a("Flax Base class for outputs of decoder-only generation models using greedy search."),ph=c(),ut=o("div"),m(ir.$$.fragment),fh=c(),yc=o("p"),mh=a("\u201CReturns a new object replacing the specified fields with new values."),ud=c(),Ke=o("h3"),_t=o("a"),kc=o("span"),m(cr.$$.fragment),hh=c(),wc=o("span"),gh=a("SampleOutput"),_d=c(),Ue=o("div"),m(lr.$$.fragment),uh=c(),Lc=o("p"),_h=a("Base class for outputs of decoder-only generation models using sampling."),vd=c(),Ye=o("div"),m(dr.$$.fragment),vh=c(),xc=o("p"),bh=a(`Base class for outputs of encoder-decoder generation models using sampling. Hidden states and attention weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)`),bd=c(),K=o("div"),m(pr.$$.fragment),$h=c(),Ec=o("p"),Th=a("Flax Base class for outputs of decoder-only generation models using sampling."),yh=c(),vt=o("div"),m(fr.$$.fragment),kh=c(),Pc=o("p"),wh=a("\u201CReturns a new object replacing the specified fields with new values."),$d=c(),Xe=o("h3"),bt=o("a"),Fc=o("span"),m(mr.$$.fragment),Lh=c(),Dc=o("span"),xh=a("BeamSearchOutput"),Td=c(),Je=o("div"),m(hr.$$.fragment),Eh=c(),zc=o("p"),Ph=a("Base class for outputs of decoder-only generation models using beam search."),yd=c(),Qe=o("div"),m(gr.$$.fragment),Fh=c(),Sc=o("p"),Dh=a(`Base class for outputs of encoder-decoder generation models using beam search. Hidden states and attention weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)`),kd=c(),Ze=o("h3"),$t=o("a"),Oc=o("span"),m(ur.$$.fragment),zh=c(),qc=o("span"),Sh=a("BeamSampleOutput"),wd=c(),et=o("div"),m(_r.$$.fragment),Oh=c(),Bc=o("p"),qh=a("Base class for outputs of decoder-only generation models using beam sample."),Ld=c(),tt=o("div"),m(vr.$$.fragment),Bh=c(),Ic=o("p"),Ih=a(`Base class for outputs of encoder-decoder generation models using beam sampling. Hidden states and attention weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)`),xd=c(),rt=o("h2"),Tt=o("a"),Ac=o("span"),m(br.$$.fragment),Ah=c(),Cc=o("span"),Ch=a("LogitsProcessor"),Ed=c(),yt=o("p"),Nh=a("A "),Zn=o("a"),Wh=a("LogitsProcessor"),Vh=a(` can be used to modify the prediction scores of a language model head for generation.`),Pd=c(),U=o("div"),m($r.$$.fragment),Mh=c(),Nc=o("p"),Gh=a("Abstract base class for all logit processors that can be applied during generation."),jh=c(),kt=o("div"),m(Tr.$$.fragment),Hh=c(),Wc=o("p"),Rh=a("Torch method for processing logits."),Fd=c(),Y=o("div"),m(yr.$$.fragment),Kh=c(),w=o("p"),Uh=a("This class can be used to create a list of "),es=o("a"),Yh=a("LogitsProcessor"),Xh=a(" or "),ts=o("a"),Jh=a("LogitsWarper"),Qh=a(` to subsequently process a `),Vc=o("code"),Zh=a("scores"),eg=a(" input tensor. This class inherits from list and adds a specific "),Mc=o("em"),Gc=o("strong"),tg=a("call"),rg=a(` method to apply each `),rs=o("a"),og=a("LogitsProcessor"),ng=a(" or "),os=o("a"),sg=a("LogitsWarper"),ag=a(" to the inputs."),ig=c(),ns=o("div"),m(kr.$$.fragment),Dd=c(),X=o("div"),m(wr.$$.fragment),cg=c(),jc=o("p"),lg=a("Abstract base class for all logit warpers that can be applied during generation with multinomial sampling."),dg=c(),wt=o("div"),m(Lr.$$.fragment),pg=c(),Hc=o("p"),fg=a("Torch method for warping logits."),zd=c(),J=o("div"),m(xr.$$.fragment),mg=c(),ss=o("p"),as=o("a"),hg=a("LogitsProcessor"),gg=a(" enforcing a min-length by setting EOS probability to 0."),ug=c(),is=o("div"),m(Er.$$.fragment),Sd=c(),Q=o("div"),m(Pr.$$.fragment),_g=c(),cs=o("p"),ls=o("a"),vg=a("LogitsWarper"),bg=a(" for temperature (exponential scaling output probability distribution)."),$g=c(),ds=o("div"),m(Fr.$$.fragment),Od=c(),Z=o("div"),m(Dr.$$.fragment),Tg=c(),ps=o("p"),fs=o("a"),yg=a("LogitsProcessor"),kg=a(" enforcing an exponential penalty on repeated sequences."),wg=c(),ms=o("div"),m(zr.$$.fragment),qd=c(),ee=o("div"),m(Sr.$$.fragment),Lg=c(),hs=o("p"),gs=o("a"),xg=a("LogitsWarper"),Eg=a(" that performs top-p, i.e. restricting to top tokens summing to prob_cut_off <= prob_cut_off."),Pg=c(),us=o("div"),m(Or.$$.fragment),Bd=c(),te=o("div"),m(qr.$$.fragment),Fg=c(),_s=o("p"),vs=o("a"),Dg=a("LogitsWarper"),zg=a(" that performs top-k, i.e. restricting to the k highest probability elements."),Sg=c(),bs=o("div"),m(Br.$$.fragment),Id=c(),re=o("div"),m(Ir.$$.fragment),Og=c(),Lt=o("p"),$s=o("a"),qg=a("LogitsProcessor"),Bg=a(` that enforces no repetition of n-grams. See `),Ar=o("a"),Ig=a("Fairseq"),Ag=a("."),Cg=c(),Ts=o("div"),m(Cr.$$.fragment),Ad=c(),oe=o("div"),m(Nr.$$.fragment),Ng=c(),ys=o("p"),ks=o("a"),Wg=a("LogitsProcessor"),Vg=a(" that enforces that specified sequences will never be sampled."),Mg=c(),ws=o("div"),m(Wr.$$.fragment),Cd=c(),ne=o("div"),m(Vr.$$.fragment),Gg=c(),xt=o("p"),Ls=o("a"),jg=a("LogitsProcessor"),Hg=a(` that enforces constrained generation and is useful for prefix-conditioned constrained generation. See `),Mr=o("a"),Rg=a("Autoregressive Entity Retrieval"),Kg=a(" for more information."),Ug=c(),xs=o("div"),m(Gr.$$.fragment),Nd=c(),se=o("div"),m(jr.$$.fragment),Yg=c(),Ae=o("p"),Es=o("a"),Xg=a("LogitsProcessor"),Jg=a(` that enforces diverse beam search. Note that this logits processor is only effective for `),Ps=o("a"),Qg=a("PreTrainedModel.group_beam_search()"),Zg=a(". See "),Hr=o("a"),eu=a(`Diverse Beam Search: Decoding Diverse Solutions from Neural Sequence Models`),tu=a(" for more details."),ru=c(),Fs=o("div"),m(Rr.$$.fragment),Wd=c(),ae=o("div"),m(Kr.$$.fragment),ou=c(),Ds=o("p"),zs=o("a"),nu=a("LogitsProcessor"),su=a(" that enforces the specified token as the first generated token."),au=c(),Ss=o("div"),m(Ur.$$.fragment),Vd=c(),ie=o("div"),m(Yr.$$.fragment),iu=c(),Et=o("p"),Os=o("a"),cu=a("LogitsProcessor"),lu=a(" that enforces the specified token as the last generated token when "),Rc=o("code"),du=a("max_length"),pu=a(" is reached."),fu=c(),qs=o("div"),m(Xr.$$.fragment),Md=c(),ce=o("div"),m(Jr.$$.fragment),mu=c(),M=o("p"),Bs=o("a"),hu=a("LogitsProcessor"),gu=a(" that removes all "),Kc=o("code"),uu=a("nan"),_u=a(" and "),Uc=o("code"),vu=a("inf"),bu=a(` values to avoid the generation method to fail. Note that using the logits processor should only be used if necessary since it can slow down the generation method. `),Yc=o("code"),$u=a("max_length"),Tu=a(` is reached.`),yu=c(),Is=o("div"),m(Qr.$$.fragment),Gd=c(),le=o("div"),m(Zr.$$.fragment),ku=c(),Xc=o("p"),wu=a("Abstract base class for all logit processors that can be applied during generation."),Lu=c(),Pt=o("div"),m(eo.$$.fragment),xu=c(),Jc=o("p"),Eu=a("TF method for processing logits."),jd=c(),de=o("div"),m(to.$$.fragment),Pu=c(),B=o("p"),Fu=a("This class can be used to create a list of "),As=o("a"),Du=a("TFLogitsProcessor"),zu=a(" to subsequently process a "),Qc=o("code"),Su=a("scores"),Ou=a(` input tensor. This class inherits from list and adds a specific `),Zc=o("em"),el=o("strong"),qu=a("call"),Bu=a(" method to apply each "),Cs=o("a"),Iu=a("TFLogitsProcessor"),Au=a(` to the inputs.`),Cu=c(),Ns=o("div"),m(ro.$$.fragment),Hd=c(),pe=o("div"),m(oo.$$.fragment),Nu=c(),tl=o("p"),Wu=a("Abstract base class for all logit warpers that can be applied during generation with multinomial sampling."),Vu=c(),Ft=o("div"),m(no.$$.fragment),Mu=c(),rl=o("p"),Gu=a("TF method for warping logits."),Rd=c(),fe=o("div"),m(so.$$.fragment),ju=c(),Ws=o("p"),Vs=o("a"),Hu=a("TFLogitsWarper"),Ru=a(" for temperature (exponential scaling output probability distribution)."),Ku=c(),Ms=o("div"),m(ao.$$.fragment),Kd=c(),me=o("div"),m(io.$$.fragment),Uu=c(),Gs=o("p"),js=o("a"),Yu=a("TFLogitsWarper"),Xu=a(" that performs top-p, i.e. restricting to top tokens summing to <= prob_cut_off."),Ju=c(),Hs=o("div"),m(co.$$.fragment),Ud=c(),he=o("div"),m(lo.$$.fragment),Qu=c(),Rs=o("p"),Ks=o("a"),Zu=a("TFLogitsWarper"),e_=a(" that performs top-k, i.e. restricting to the k highest probability elements."),t_=c(),Us=o("div"),m(po.$$.fragment),Yd=c(),ge=o("div"),m(fo.$$.fragment),r_=c(),Ys=o("p"),Xs=o("a"),o_=a("TFLogitsProcessor"),n_=a(" enforcing a min-length by setting EOS probability to 0."),s_=c(),Js=o("div"),m(mo.$$.fragment),Xd=c(),ue=o("div"),m(ho.$$.fragment),a_=c(),Qs=o("p"),Zs=o("a"),i_=a("TFLogitsProcessor"),c_=a(" that enforces that specified sequences will never be sampled."),l_=c(),ea=o("div"),m(go.$$.fragment),Jd=c(),_e=o("div"),m(uo.$$.fragment),d_=c(),Dt=o("p"),ta=o("a"),p_=a("TFLogitsProcessor"),f_=a(` that enforces no repetition of n-grams. See `),_o=o("a"),m_=a("Fairseq"),h_=a("."),g_=c(),ra=o("div"),m(vo.$$.fragment),Qd=c(),ve=o("div"),m(bo.$$.fragment),u_=c(),oa=o("p"),na=o("a"),__=a("TFLogitsProcessor"),v_=a(" enforcing an exponential penalty on repeated sequences."),b_=c(),sa=o("div"),m($o.$$.fragment),Zd=c(),be=o("div"),m(To.$$.fragment),$_=c(),ol=o("p"),T_=a("Abstract base class for all logit processors that can be applied during generation."),y_=c(),zt=o("div"),m(yo.$$.fragment),k_=c(),nl=o("p"),w_=a("Flax method for processing logits."),ep=c(),$e=o("div"),m(ko.$$.fragment),L_=c(),L=o("p"),x_=a("This class can be used to create a list of "),aa=o("a"),E_=a("FlaxLogitsProcessor"),P_=a(" or "),ia=o("a"),F_=a("FlaxLogitsWarper"),D_=a(` to subsequently process a `),sl=o("code"),z_=a("scores"),S_=a(" input tensor. This class inherits from list and adds a specific "),al=o("em"),il=o("strong"),O_=a("call"),q_=a(` method to apply each `),ca=o("a"),B_=a("FlaxLogitsProcessor"),I_=a(" or "),la=o("a"),A_=a("FlaxLogitsWarper"),C_=a(" to the inputs."),N_=c(),da=o("div"),m(wo.$$.fragment),tp=c(),Te=o("div"),m(Lo.$$.fragment),W_=c(),cl=o("p"),V_=a("Abstract base class for all logit warpers that can be applied during generation with multinomial sampling."),M_=c(),St=o("div"),m(xo.$$.fragment),G_=c(),ll=o("p"),j_=a("Flax method for warping logits."),rp=c(),ye=o("div"),m(Eo.$$.fragment),H_=c(),pa=o("p"),fa=o("a"),R_=a("FlaxLogitsWarper"),K_=a(" for temperature (exponential scaling output probability distribution)."),U_=c(),ma=o("div"),m(Po.$$.fragment),op=c(),ke=o("div"),m(Fo.$$.fragment),Y_=c(),ha=o("p"),ga=o("a"),X_=a("FlaxLogitsWarper"),J_=a(" that performs top-p, i.e. restricting to top tokens summing to prob_cut_off <= prob_cut_off."),Q_=c(),ua=o("div"),m(Do.$$.fragment),np=c(),we=o("div"),m(zo.$$.fragment),Z_=c(),_a=o("p"),va=o("a"),ev=a("FlaxLogitsWarper"),tv=a(" that performs top-k, i.e. restricting to the k highest probability elements."),rv=c(),ba=o("div"),m(So.$$.fragment),sp=c(),Le=o("div"),m(Oo.$$.fragment),ov=c(),$a=o("p"),Ta=o("a"),nv=a("FlaxLogitsProcessor"),sv=a(" that enforces the specified token as the first generated token."),av=c(),ya=o("div"),m(qo.$$.fragment),ap=c(),xe=o("div"),m(Bo.$$.fragment),iv=c(),Ot=o("p"),ka=o("a"),cv=a("FlaxLogitsProcessor"),lv=a(" that enforces the specified token as the last generated token when "),dl=o("code"),dv=a("max_length"),pv=a(" is reached."),fv=c(),wa=o("div"),m(Io.$$.fragment),ip=c(),Ee=o("div"),m(Ao.$$.fragment),mv=c(),La=o("p"),xa=o("a"),hv=a("FlaxLogitsProcessor"),gv=a(" enforcing a min-length by setting EOS probability to 0."),uv=c(),Ea=o("div"),m(Co.$$.fragment),cp=c(),ot=o("h2"),qt=o("a"),pl=o("span"),m(No.$$.fragment),_v=c(),fl=o("span"),vv=a("StoppingCriteria"),lp=c(),Bt=o("p"),bv=a("A "),Pa=o("a"),$v=a("StoppingCriteria"),Tv=a(" can be used to change when to stop generation (other than EOS token)."),dp=c(),Pe=o("div"),m(Wo.$$.fragment),yv=c(),ml=o("p"),kv=a("Abstract base class for all stopping criteria that can be applied during generation."),wv=c(),Fa=o("div"),m(Vo.$$.fragment),pp=c(),nt=o("div"),m(Mo.$$.fragment),Lv=c(),Da=o("div"),m(Go.$$.fragment),fp=c(),Fe=o("div"),m(jo.$$.fragment),xv=c(),Ho=o("p"),Ev=a("This class can be used to stop generation whenever the full generated number of tokens exceeds "),hl=o("code"),Pv=a("max_length"),Fv=a(`. Keep in mind for decoder-only type of transformers, this will include the initial prompted tokens.`),Dv=c(),za=o("div"),m(Ro.$$.fragment),mp=c(),De=o("div"),m(Ko.$$.fragment),zv=c(),Uo=o("p"),Sv=a(`This class can be used to stop generation whenever the full generation exceeds some amount of time. By default, the time will start being counted when you initialize this function. You can override this by passing an `),gl=o("code"),Ov=a("initial_time"),qv=a("."),Bv=c(),Sa=o("div"),m(Yo.$$.fragment),hp=c(),st=o("h2"),It=o("a"),ul=o("span"),m(Xo.$$.fragment),Iv=c(),_l=o("span"),Av=a("Constraints"),gp=c(),At=o("p"),Cv=a("A "),Oa=o("a"),Nv=a("Constraint"),Wv=a(" can be used to force the generation to include specific tokens or sequences in the output."),up=c(),$=o("div"),m(Jo.$$.fragment),Vv=c(),vl=o("p"),Mv=a(`Abstract base class for all constraints that can be applied during generation. It must define how the constraint can be satisfied.`),Gv=c(),bl=o("p"),jv=a("All classes that inherit Constraint must follow the requirement that"),Hv=c(),m(Qo.$$.fragment),Rv=c(),$l=o("p"),Kv=a("will always terminate (halt)."),Uv=c(),Ct=o("div"),m(Zo.$$.fragment),Yv=c(),Tl=o("p"),Xv=a("When called, returns the token that would take this constraint one step closer to being fulfilled."),Jv=c(),Nt=o("div"),m(en.$$.fragment),Qv=c(),yl=o("p"),Zv=a("Creates a new instance of this constraint."),eb=c(),Wt=o("div"),m(tn.$$.fragment),tb=c(),kl=o("p"),rb=a("Reads in a token and returns whether it creates progress."),ob=c(),Vt=o("div"),m(rn.$$.fragment),nb=c(),on=o("p"),sb=a("Returns the number of remaining steps of "),wl=o("code"),ab=a("advance()"),ib=a(" in order to complete this constraint."),cb=c(),Mt=o("div"),m(nn.$$.fragment),lb=c(),Ll=o("p"),db=a(`Resets the state of this constraint to its initialization. We would call this in cases where the fulfillment of a constraint is abrupted by an unwanted token.`),pb=c(),Gt=o("div"),m(sn.$$.fragment),fb=c(),xl=o("p"),mb=a("Tests whether this constraint has been properly defined."),hb=c(),Ce=o("div"),m(an.$$.fragment),gb=c(),cn=o("p"),ub=a(`Reads in a token and returns booleans that indicate the progress made by it. This function will update the state of this object unlikes `),El=o("code"),_b=a("does_advance(self, token_id: int)"),vb=a("."),bb=c(),Pl=o("p"),$b=a(`This isn\u2019t to test whether a certain token will advance the progress; it\u2019s to update its state as if it has been generated. This becomes important if token_id != desired token (refer to else statement in PhrasalConstraint)`),_p=c(),at=o("div"),m(ln.$$.fragment),Tb=c(),qa=o("p"),Ba=o("a"),yb=a("Constraint"),kb=a(" enforcing that an ordered sequence of tokens is included in the output."),vp=c(),it=o("div"),m(dn.$$.fragment),wb=c(),pn=o("p"),Lb=a("A special "),Ia=o("a"),xb=a("Constraint"),Eb=a(" that is fulfilled by fulfilling just one of several constraints."),bp=c(),I=o("div"),m(fn.$$.fragment),Pb=c(),Fl=o("p"),Fb=a("A class for beam scorers to track its progress through a list of constraints."),Db=c(),E=o("div"),m(mn.$$.fragment),zb=c(),Dl=o("p"),Sb=a(`The list of tokens to generate such that we can make progress. By \u201Clist\u201D we don\u2019t mean the list of token that will fully fulfill a constraint.`),Ob=c(),ct=o("p"),qb=a("Given constraints "),zl=o("code"),Bb=a("c_i = {t_ij | j == # of tokens}"),Ib=a(`, If we\u2019re not in the middle of progressing through a specific constraint `),Sl=o("code"),Ab=a("c_i"),Cb=a(", we return:"),Nb=c(),Ol=o("p"),ql=o("code"),Wb=a("[t_k1 for k in indices of unfulfilled constraints]"),Vb=c(),ze=o("p"),Mb=a(`If we are in the middle of a constraint, then we return: `),Bl=o("code"),Gb=a("[t_ij]"),jb=a(", where "),Il=o("code"),Hb=a("i"),Rb=a(" is the index of the inprogress constraint, "),Al=o("code"),Kb=a("j"),Ub=a(" is the next step for the constraint."),Yb=c(),Cl=o("p"),Xb=a(`Though we don\u2019t care which constraint is fulfilled first, if we are in the progress of fulfilling a constraint, that\u2019s the only one we\u2019ll return.`),Jb=c(),jt=o("div"),m(hn.$$.fragment),Qb=c(),Nl=o("p"),Zb=a("token_ids: the tokens generated thus far to reset the state of the progress through constraints."),$p=c(),lt=o("h2"),Ht=o("a"),Wl=o("span"),m(gn.$$.fragment),e1=c(),Vl=o("span"),t1=a("BeamSearch"),Tp=c(),A=o("div"),m(un.$$.fragment),r1=c(),dt=o("p"),o1=a("Abstract base class for all beam scorers that are used for "),Aa=o("a"),n1=a("beam_search()"),s1=a(` and `),Ca=o("a"),a1=a("beam_sample()"),i1=a("."),c1=c(),Na=o("div"),m(_n.$$.fragment),l1=c(),Wa=o("div"),m(vn.$$.fragment),yp=c(),x=o("div"),m(bn.$$.fragment),d1=c(),Va=o("p"),Ma=o("a"),p1=a("BeamScorer"),f1=a(" implementing standard beam search decoding."),m1=c(),$n=o("p"),h1=a("Adapted in part from "),Tn=o("a"),g1=a(`Facebook\u2019s XLM beam search code`),u1=a("."),_1=c(),Ga=o("p"),v1=a("Reference for the diverse beam search algorithm and implementation "),yn=o("a"),b1=a(`Ashwin Kalyan\u2019s DBS implementation`),$1=c(),ja=o("div"),m(kn.$$.fragment),T1=c(),Ha=o("div"),m(wn.$$.fragment),kp=c(),C=o("div"),m(Ln.$$.fragment),y1=c(),Ra=o("p"),Ka=o("a"),k1=a("BeamScorer"),w1=a(" implementing constrained beam search decoding."),L1=c(),Ua=o("div"),m(xn.$$.fragment),x1=c(),Ya=o("div"),m(En.$$.fragment),wp=c(),pt=o("h2"),Rt=o("a"),Ml=o("span"),m(Pn.$$.fragment),E1=c(),Gl=o("span"),P1=a("Utilities"),Lp=c(),Se=o("div"),m(Fn.$$.fragment),F1=c(),jl=o("p"),D1=a("Filter a distribution of logits using top-k and/or nucleus (top-p) filtering"),z1=c(),Xa=o("p"),S1=a("From: "),Dn=o("a"),O1=a("https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317"),xp=c(),Oe=o("div"),m(zn.$$.fragment),q1=c(),Hl=o("p"),B1=a("Filter a distribution of logits using top-k and/or nucleus (top-p) filtering"),I1=c(),Ja=o("p"),A1=a("From: "),Sn=o("a"),C1=a("https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317"),this.h()},l(e){const p=P2('[data-svelte="svelte-1phssyn"]',document.head);Be=n(p,"META",{name:!0,content:!0}),p.forEach(r),qn=l(e),N=n(e,"H1",{class:!0});var Pp=s(N);H=n(Pp,"A",{id:!0,class:!0,href:!0});var h$=s(H);Gi=n(h$,"SPAN",{});var g$=s(Gi);h(Zt.$$.fragment,g$),g$.forEach(r),h$.forEach(r),kf=l(Pp),ji=n(Pp,"SPAN",{});var u$=s(ji);wf=i(u$,"Utilities for Generation"),u$.forEach(r),Pp.forEach(r),Jl=l(e),y=n(e,"P",{});var P=s(y);Lf=i(P,"This page lists all the utility functions used by "),Bn=n(P,"A",{href:!0});var _$=s(Bn);xf=i(_$,"generate()"),_$.forEach(r),Ef=i(P,`, `),In=n(P,"A",{href:!0});var v$=s(In);Pf=i(v$,"greedy_search()"),v$.forEach(r),Ff=i(P,`, `),An=n(P,"A",{href:!0});var b$=s(An);Df=i(b$,"sample()"),b$.forEach(r),zf=i(P,`, `),Cn=n(P,"A",{href:!0});var $$=s(Cn);Sf=i($$,"beam_search()"),$$.forEach(r),Of=i(P,`, `),Nn=n(P,"A",{href:!0});var T$=s(Nn);qf=i(T$,"beam_sample()"),T$.forEach(r),Bf=i(P,`, `),Wn=n(P,"A",{href:!0});var y$=s(Wn);If=i(y$,"group_beam_search()"),y$.forEach(r),Af=i(P,`, and `),Vn=n(P,"A",{href:!0});var k$=s(Vn);Cf=i(k$,"constrained_beam_search()"),k$.forEach(r),Nf=i(P,"."),P.forEach(r),Ql=l(e),Mn=n(e,"P",{});var w$=s(Mn);Wf=i(w$,"Most of those are only useful if you are studying the code of the generate methods in the library."),w$.forEach(r),Zl=l(e),Ge=n(e,"H2",{class:!0});var Fp=s(Ge);mt=n(Fp,"A",{id:!0,class:!0,href:!0});var L$=s(mt);Hi=n(L$,"SPAN",{});var x$=s(Hi);h(er.$$.fragment,x$),x$.forEach(r),L$.forEach(r),Vf=l(Fp),Ri=n(Fp,"SPAN",{});var E$=s(Ri);Mf=i(E$,"Generate Outputs"),E$.forEach(r),Fp.forEach(r),ed=l(e),W=n(e,"P",{});var Kt=s(W);Gf=i(Kt,"The output of "),Gn=n(Kt,"A",{href:!0});var P$=s(Gn);jf=i(P$,"generate()"),P$.forEach(r),Hf=i(Kt,` is an instance of a subclass of `),jn=n(Kt,"A",{href:!0});var F$=s(jn);Rf=i(F$,"ModelOutput"),F$.forEach(r),Kf=i(Kt,`. This output is a data structure containing all the information returned by `),Hn=n(Kt,"A",{href:!0});var D$=s(Hn);Uf=i(D$,"generate()"),D$.forEach(r),Yf=i(Kt,", but that can also be used as tuple or dictionary."),Kt.forEach(r),td=l(e),Rn=n(e,"P",{});var z$=s(Rn);Xf=i(z$,"Here\u2019s an example:"),z$.forEach(r),rd=l(e),h(tr.$$.fragment,e),od=l(e),Ie=n(e,"P",{});var Qa=s(Ie);Jf=i(Qa,"The "),Ki=n(Qa,"CODE",{});var S$=s(Ki);Qf=i(S$,"generation_output"),S$.forEach(r),Zf=i(Qa," object is a "),Kn=n(Qa,"A",{href:!0});var O$=s(Kn);em=i(O$,"GreedySearchDecoderOnlyOutput"),O$.forEach(r),tm=i(Qa,`, as we can see in the documentation of that class below, it means it has the following attributes:`),Qa.forEach(r),nd=l(e),V=n(e,"UL",{});var Ut=s(V);Un=n(Ut,"LI",{});var N1=s(Un);Ui=n(N1,"CODE",{});var q$=s(Ui);rm=i(q$,"sequences"),q$.forEach(r),om=i(N1,": the generated sequences of tokens"),N1.forEach(r),nm=l(Ut),Yn=n(Ut,"LI",{});var W1=s(Yn);Yi=n(W1,"CODE",{});var B$=s(Yi);sm=i(B$,"scores"),B$.forEach(r),am=i(W1," (optional): the prediction scores of the language modelling head, for each generation step"),W1.forEach(r),im=l(Ut),Xn=n(Ut,"LI",{});var V1=s(Xn);Xi=n(V1,"CODE",{});var I$=s(Xi);cm=i(I$,"hidden_states"),I$.forEach(r),lm=i(V1," (optional): the hidden states of the model, for each generation step"),V1.forEach(r),dm=l(Ut),Jn=n(Ut,"LI",{});var M1=s(Jn);Ji=n(M1,"CODE",{});var A$=s(Ji);pm=i(A$,"attentions"),A$.forEach(r),fm=i(M1," (optional): the attention weights of the model, for each generation step"),M1.forEach(r),Ut.forEach(r),sd=l(e),k=n(e,"P",{});var S=s(k);mm=i(S,"Here we have the "),Qi=n(S,"CODE",{});var C$=s(Qi);hm=i(C$,"scores"),C$.forEach(r),gm=i(S," since we passed along "),Zi=n(S,"CODE",{});var N$=s(Zi);um=i(N$,"output_scores=True"),N$.forEach(r),_m=i(S,", but we don\u2019t have "),ec=n(S,"CODE",{});var W$=s(ec);vm=i(W$,"hidden_states"),W$.forEach(r),bm=i(S,` and `),tc=n(S,"CODE",{});var V$=s(tc);$m=i(V$,"attentions"),V$.forEach(r),Tm=i(S," because we didn\u2019t pass "),rc=n(S,"CODE",{});var M$=s(rc);ym=i(M$,"output_hidden_states=True"),M$.forEach(r),km=i(S," or "),oc=n(S,"CODE",{});var G$=s(oc);wm=i(G$,"output_attentions=True"),G$.forEach(r),Lm=i(S,"."),S.forEach(r),ad=l(e),F=n(e,"P",{});var Ne=s(F);xm=i(Ne,`You can access each attribute as you would usually do, and if that attribute has not been returned by the model, you will get `),nc=n(Ne,"CODE",{});var j$=s(nc);Em=i(j$,"None"),j$.forEach(r),Pm=i(Ne,". Here for instance "),sc=n(Ne,"CODE",{});var H$=s(sc);Fm=i(H$,"generation_output.scores"),H$.forEach(r),Dm=i(Ne,` are all the generated prediction scores of the language modeling head, and `),ac=n(Ne,"CODE",{});var R$=s(ac);zm=i(R$,"generation_output.attentions"),R$.forEach(r),Sm=i(Ne," is "),ic=n(Ne,"CODE",{});var K$=s(ic);Om=i(K$,"None"),K$.forEach(r),qm=i(Ne,"."),Ne.forEach(r),id=l(e),D=n(e,"P",{});var We=s(D);Bm=i(We,"When using our "),cc=n(We,"CODE",{});var U$=s(cc);Im=i(U$,"generation_output"),U$.forEach(r),Am=i(We," object as a tuple, it only keeps the attributes that don\u2019t have "),lc=n(We,"CODE",{});var Y$=s(lc);Cm=i(Y$,"None"),Y$.forEach(r),Nm=i(We,` values. Here, for instance, it has two elements, `),dc=n(We,"CODE",{});var X$=s(dc);Wm=i(X$,"loss"),X$.forEach(r),Vm=i(We," then "),pc=n(We,"CODE",{});var J$=s(pc);Mm=i(J$,"logits"),J$.forEach(r),Gm=i(We,", so"),We.forEach(r),cd=l(e),h(rr.$$.fragment,e),ld=l(e),ht=n(e,"P",{});var Dp=s(ht);jm=i(Dp,"will return the tuple "),fc=n(Dp,"CODE",{});var Q$=s(fc);Hm=i(Q$,"(generation_output.sequences, generation_output.scores)"),Q$.forEach(r),Rm=i(Dp," for instance."),Dp.forEach(r),dd=l(e),z=n(e,"P",{});var Ve=s(z);Km=i(Ve,"When using our "),mc=n(Ve,"CODE",{});var Z$=s(mc);Um=i(Z$,"generation_output"),Z$.forEach(r),Ym=i(Ve," object as a dictionary, it only keeps the attributes that don\u2019t have "),hc=n(Ve,"CODE",{});var eT=s(hc);Xm=i(eT,"None"),eT.forEach(r),Jm=i(Ve,` values. Here, for instance, it has two keys that are `),gc=n(Ve,"CODE",{});var tT=s(gc);Qm=i(tT,"sequences"),tT.forEach(r),Zm=i(Ve," and "),uc=n(Ve,"CODE",{});var rT=s(uc);eh=i(rT,"scores"),rT.forEach(r),th=i(Ve,"."),Ve.forEach(r),pd=l(e),Qn=n(e,"P",{});var oT=s(Qn);rh=i(oT,"We document here all output types."),oT.forEach(r),fd=l(e),je=n(e,"H3",{class:!0});var zp=s(je);gt=n(zp,"A",{id:!0,class:!0,href:!0});var nT=s(gt);_c=n(nT,"SPAN",{});var sT=s(_c);h(or.$$.fragment,sT),sT.forEach(r),nT.forEach(r),oh=l(zp),vc=n(zp,"SPAN",{});var aT=s(vc);nh=i(aT,"GreedySearchOutput"),aT.forEach(r),zp.forEach(r),md=l(e),He=n(e,"DIV",{class:!0});var Sp=s(He);h(nr.$$.fragment,Sp),sh=l(Sp),bc=n(Sp,"P",{});var iT=s(bc);ah=i(iT,"Base class for outputs of decoder-only generation models using greedy search."),iT.forEach(r),Sp.forEach(r),hd=l(e),Re=n(e,"DIV",{class:!0});var Op=s(Re);h(sr.$$.fragment,Op),ih=l(Op),$c=n(Op,"P",{});var cT=s($c);ch=i(cT,`Base class for outputs of encoder-decoder generation models using greedy search. Hidden states and attention weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)`),cT.forEach(r),Op.forEach(r),gd=l(e),R=n(e,"DIV",{class:!0});var Za=s(R);h(ar.$$.fragment,Za),lh=l(Za),Tc=n(Za,"P",{});var lT=s(Tc);dh=i(lT,"Flax Base class for outputs of decoder-only generation models using greedy search."),lT.forEach(r),ph=l(Za),ut=n(Za,"DIV",{class:!0});var qp=s(ut);h(ir.$$.fragment,qp),fh=l(qp),yc=n(qp,"P",{});var dT=s(yc);mh=i(dT,"\u201CReturns a new object replacing the specified fields with new values."),dT.forEach(r),qp.forEach(r),Za.forEach(r),ud=l(e),Ke=n(e,"H3",{class:!0});var Bp=s(Ke);_t=n(Bp,"A",{id:!0,class:!0,href:!0});var pT=s(_t);kc=n(pT,"SPAN",{});var fT=s(kc);h(cr.$$.fragment,fT),fT.forEach(r),pT.forEach(r),hh=l(Bp),wc=n(Bp,"SPAN",{});var mT=s(wc);gh=i(mT,"SampleOutput"),mT.forEach(r),Bp.forEach(r),_d=l(e),Ue=n(e,"DIV",{class:!0});var Ip=s(Ue);h(lr.$$.fragment,Ip),uh=l(Ip),Lc=n(Ip,"P",{});var hT=s(Lc);_h=i(hT,"Base class for outputs of decoder-only generation models using sampling."),hT.forEach(r),Ip.forEach(r),vd=l(e),Ye=n(e,"DIV",{class:!0});var Ap=s(Ye);h(dr.$$.fragment,Ap),vh=l(Ap),xc=n(Ap,"P",{});var gT=s(xc);bh=i(gT,`Base class for outputs of encoder-decoder generation models using sampling. Hidden states and attention weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)`),gT.forEach(r),Ap.forEach(r),bd=l(e),K=n(e,"DIV",{class:!0});var ei=s(K);h(pr.$$.fragment,ei),$h=l(ei),Ec=n(ei,"P",{});var uT=s(Ec);Th=i(uT,"Flax Base class for outputs of decoder-only generation models using sampling."),uT.forEach(r),yh=l(ei),vt=n(ei,"DIV",{class:!0});var Cp=s(vt);h(fr.$$.fragment,Cp),kh=l(Cp),Pc=n(Cp,"P",{});var _T=s(Pc);wh=i(_T,"\u201CReturns a new object replacing the specified fields with new values."),_T.forEach(r),Cp.forEach(r),ei.forEach(r),$d=l(e),Xe=n(e,"H3",{class:!0});var Np=s(Xe);bt=n(Np,"A",{id:!0,class:!0,href:!0});var vT=s(bt);Fc=n(vT,"SPAN",{});var bT=s(Fc);h(mr.$$.fragment,bT),bT.forEach(r),vT.forEach(r),Lh=l(Np),Dc=n(Np,"SPAN",{});var $T=s(Dc);xh=i($T,"BeamSearchOutput"),$T.forEach(r),Np.forEach(r),Td=l(e),Je=n(e,"DIV",{class:!0});var Wp=s(Je);h(hr.$$.fragment,Wp),Eh=l(Wp),zc=n(Wp,"P",{});var TT=s(zc);Ph=i(TT,"Base class for outputs of decoder-only generation models using beam search."),TT.forEach(r),Wp.forEach(r),yd=l(e),Qe=n(e,"DIV",{class:!0});var Vp=s(Qe);h(gr.$$.fragment,Vp),Fh=l(Vp),Sc=n(Vp,"P",{});var yT=s(Sc);Dh=i(yT,`Base class for outputs of encoder-decoder generation models using beam search. Hidden states and attention weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)`),yT.forEach(r),Vp.forEach(r),kd=l(e),Ze=n(e,"H3",{class:!0});var Mp=s(Ze);$t=n(Mp,"A",{id:!0,class:!0,href:!0});var kT=s($t);Oc=n(kT,"SPAN",{});var wT=s(Oc);h(ur.$$.fragment,wT),wT.forEach(r),kT.forEach(r),zh=l(Mp),qc=n(Mp,"SPAN",{});var LT=s(qc);Sh=i(LT,"BeamSampleOutput"),LT.forEach(r),Mp.forEach(r),wd=l(e),et=n(e,"DIV",{class:!0});var Gp=s(et);h(_r.$$.fragment,Gp),Oh=l(Gp),Bc=n(Gp,"P",{});var xT=s(Bc);qh=i(xT,"Base class for outputs of decoder-only generation models using beam sample."),xT.forEach(r),Gp.forEach(r),Ld=l(e),tt=n(e,"DIV",{class:!0});var jp=s(tt);h(vr.$$.fragment,jp),Bh=l(jp),Ic=n(jp,"P",{});var ET=s(Ic);Ih=i(ET,`Base class for outputs of encoder-decoder generation models using beam sampling. Hidden states and attention weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)`),ET.forEach(r),jp.forEach(r),xd=l(e),rt=n(e,"H2",{class:!0});var Hp=s(rt);Tt=n(Hp,"A",{id:!0,class:!0,href:!0});var PT=s(Tt);Ac=n(PT,"SPAN",{});var FT=s(Ac);h(br.$$.fragment,FT),FT.forEach(r),PT.forEach(r),Ah=l(Hp),Cc=n(Hp,"SPAN",{});var DT=s(Cc);Ch=i(DT,"LogitsProcessor"),DT.forEach(r),Hp.forEach(r),Ed=l(e),yt=n(e,"P",{});var Rp=s(yt);Nh=i(Rp,"A "),Zn=n(Rp,"A",{href:!0});var zT=s(Zn);Wh=i(zT,"LogitsProcessor"),zT.forEach(r),Vh=i(Rp,` can be used to modify the prediction scores of a language model head for generation.`),Rp.forEach(r),Pd=l(e),U=n(e,"DIV",{class:!0});var ti=s(U);h($r.$$.fragment,ti),Mh=l(ti),Nc=n(ti,"P",{});var ST=s(Nc);Gh=i(ST,"Abstract base class for all logit processors that can be applied during generation."),ST.forEach(r),jh=l(ti),kt=n(ti,"DIV",{class:!0});var Kp=s(kt);h(Tr.$$.fragment,Kp),Hh=l(Kp),Wc=n(Kp,"P",{});var OT=s(Wc);Rh=i(OT,"Torch method for processing logits."),OT.forEach(r),Kp.forEach(r),ti.forEach(r),Fd=l(e),Y=n(e,"DIV",{class:!0});var ri=s(Y);h(yr.$$.fragment,ri),Kh=l(ri),w=n(ri,"P",{});var O=s(w);Uh=i(O,"This class can be used to create a list of "),es=n(O,"A",{href:!0});var qT=s(es);Yh=i(qT,"LogitsProcessor"),qT.forEach(r),Xh=i(O," or "),ts=n(O,"A",{href:!0});var BT=s(ts);Jh=i(BT,"LogitsWarper"),BT.forEach(r),Qh=i(O,` to subsequently process a `),Vc=n(O,"CODE",{});var IT=s(Vc);Zh=i(IT,"scores"),IT.forEach(r),eg=i(O," input tensor. This class inherits from list and adds a specific "),Mc=n(O,"EM",{});var AT=s(Mc);Gc=n(AT,"STRONG",{});var CT=s(Gc);tg=i(CT,"call"),CT.forEach(r),AT.forEach(r),rg=i(O,` method to apply each `),rs=n(O,"A",{href:!0});var NT=s(rs);og=i(NT,"LogitsProcessor"),NT.forEach(r),ng=i(O," or "),os=n(O,"A",{href:!0});var WT=s(os);sg=i(WT,"LogitsWarper"),WT.forEach(r),ag=i(O," to the inputs."),O.forEach(r),ig=l(ri),ns=n(ri,"DIV",{class:!0});var VT=s(ns);h(kr.$$.fragment,VT),VT.forEach(r),ri.forEach(r),Dd=l(e),X=n(e,"DIV",{class:!0});var oi=s(X);h(wr.$$.fragment,oi),cg=l(oi),jc=n(oi,"P",{});var MT=s(jc);lg=i(MT,"Abstract base class for all logit warpers that can be applied during generation with multinomial sampling."),MT.forEach(r),dg=l(oi),wt=n(oi,"DIV",{class:!0});var Up=s(wt);h(Lr.$$.fragment,Up),pg=l(Up),Hc=n(Up,"P",{});var GT=s(Hc);fg=i(GT,"Torch method for warping logits."),GT.forEach(r),Up.forEach(r),oi.forEach(r),zd=l(e),J=n(e,"DIV",{class:!0});var ni=s(J);h(xr.$$.fragment,ni),mg=l(ni),ss=n(ni,"P",{});var G1=s(ss);as=n(G1,"A",{href:!0});var jT=s(as);hg=i(jT,"LogitsProcessor"),jT.forEach(r),gg=i(G1," enforcing a min-length by setting EOS probability to 0."),G1.forEach(r),ug=l(ni),is=n(ni,"DIV",{class:!0});var HT=s(is);h(Er.$$.fragment,HT),HT.forEach(r),ni.forEach(r),Sd=l(e),Q=n(e,"DIV",{class:!0});var si=s(Q);h(Pr.$$.fragment,si),_g=l(si),cs=n(si,"P",{});var j1=s(cs);ls=n(j1,"A",{href:!0});var RT=s(ls);vg=i(RT,"LogitsWarper"),RT.forEach(r),bg=i(j1," for temperature (exponential scaling output probability distribution)."),j1.forEach(r),$g=l(si),ds=n(si,"DIV",{class:!0});var KT=s(ds);h(Fr.$$.fragment,KT),KT.forEach(r),si.forEach(r),Od=l(e),Z=n(e,"DIV",{class:!0});var ai=s(Z);h(Dr.$$.fragment,ai),Tg=l(ai),ps=n(ai,"P",{});var H1=s(ps);fs=n(H1,"A",{href:!0});var UT=s(fs);yg=i(UT,"LogitsProcessor"),UT.forEach(r),kg=i(H1," enforcing an exponential penalty on repeated sequences."),H1.forEach(r),wg=l(ai),ms=n(ai,"DIV",{class:!0});var YT=s(ms);h(zr.$$.fragment,YT),YT.forEach(r),ai.forEach(r),qd=l(e),ee=n(e,"DIV",{class:!0});var ii=s(ee);h(Sr.$$.fragment,ii),Lg=l(ii),hs=n(ii,"P",{});var R1=s(hs);gs=n(R1,"A",{href:!0});var XT=s(gs);xg=i(XT,"LogitsWarper"),XT.forEach(r),Eg=i(R1," that performs top-p, i.e. restricting to top tokens summing to prob_cut_off <= prob_cut_off."),R1.forEach(r),Pg=l(ii),us=n(ii,"DIV",{class:!0});var JT=s(us);h(Or.$$.fragment,JT),JT.forEach(r),ii.forEach(r),Bd=l(e),te=n(e,"DIV",{class:!0});var ci=s(te);h(qr.$$.fragment,ci),Fg=l(ci),_s=n(ci,"P",{});var K1=s(_s);vs=n(K1,"A",{href:!0});var QT=s(vs);Dg=i(QT,"LogitsWarper"),QT.forEach(r),zg=i(K1," that performs top-k, i.e. restricting to the k highest probability elements."),K1.forEach(r),Sg=l(ci),bs=n(ci,"DIV",{class:!0});var ZT=s(bs);h(Br.$$.fragment,ZT),ZT.forEach(r),ci.forEach(r),Id=l(e),re=n(e,"DIV",{class:!0});var li=s(re);h(Ir.$$.fragment,li),Og=l(li),Lt=n(li,"P",{});var Rl=s(Lt);$s=n(Rl,"A",{href:!0});var e4=s($s);qg=i(e4,"LogitsProcessor"),e4.forEach(r),Bg=i(Rl,` that enforces no repetition of n-grams. See `),Ar=n(Rl,"A",{href:!0,rel:!0});var t4=s(Ar);Ig=i(t4,"Fairseq"),t4.forEach(r),Ag=i(Rl,"."),Rl.forEach(r),Cg=l(li),Ts=n(li,"DIV",{class:!0});var r4=s(Ts);h(Cr.$$.fragment,r4),r4.forEach(r),li.forEach(r),Ad=l(e),oe=n(e,"DIV",{class:!0});var di=s(oe);h(Nr.$$.fragment,di),Ng=l(di),ys=n(di,"P",{});var U1=s(ys);ks=n(U1,"A",{href:!0});var o4=s(ks);Wg=i(o4,"LogitsProcessor"),o4.forEach(r),Vg=i(U1," that enforces that specified sequences will never be sampled."),U1.forEach(r),Mg=l(di),ws=n(di,"DIV",{class:!0});var n4=s(ws);h(Wr.$$.fragment,n4),n4.forEach(r),di.forEach(r),Cd=l(e),ne=n(e,"DIV",{class:!0});var pi=s(ne);h(Vr.$$.fragment,pi),Gg=l(pi),xt=n(pi,"P",{});var Kl=s(xt);Ls=n(Kl,"A",{href:!0});var s4=s(Ls);jg=i(s4,"LogitsProcessor"),s4.forEach(r),Hg=i(Kl,` that enforces constrained generation and is useful for prefix-conditioned constrained generation. See `),Mr=n(Kl,"A",{href:!0,rel:!0});var a4=s(Mr);Rg=i(a4,"Autoregressive Entity Retrieval"),a4.forEach(r),Kg=i(Kl," for more information."),Kl.forEach(r),Ug=l(pi),xs=n(pi,"DIV",{class:!0});var i4=s(xs);h(Gr.$$.fragment,i4),i4.forEach(r),pi.forEach(r),Nd=l(e),se=n(e,"DIV",{class:!0});var fi=s(se);h(jr.$$.fragment,fi),Yg=l(fi),Ae=n(fi,"P",{});var On=s(Ae);Es=n(On,"A",{href:!0});var c4=s(Es);Xg=i(c4,"LogitsProcessor"),c4.forEach(r),Jg=i(On,` that enforces diverse beam search. Note that this logits processor is only effective for `),Ps=n(On,"A",{href:!0});var l4=s(Ps);Qg=i(l4,"PreTrainedModel.group_beam_search()"),l4.forEach(r),Zg=i(On,". See "),Hr=n(On,"A",{href:!0,rel:!0});var d4=s(Hr);eu=i(d4,`Diverse Beam Search: Decoding Diverse Solutions from Neural Sequence Models`),d4.forEach(r),tu=i(On," for more details."),On.forEach(r),ru=l(fi),Fs=n(fi,"DIV",{class:!0});var p4=s(Fs);h(Rr.$$.fragment,p4),p4.forEach(r),fi.forEach(r),Wd=l(e),ae=n(e,"DIV",{class:!0});var mi=s(ae);h(Kr.$$.fragment,mi),ou=l(mi),Ds=n(mi,"P",{});var Y1=s(Ds);zs=n(Y1,"A",{href:!0});var f4=s(zs);nu=i(f4,"LogitsProcessor"),f4.forEach(r),su=i(Y1," that enforces the specified token as the first generated token."),Y1.forEach(r),au=l(mi),Ss=n(mi,"DIV",{class:!0});var m4=s(Ss);h(Ur.$$.fragment,m4),m4.forEach(r),mi.forEach(r),Vd=l(e),ie=n(e,"DIV",{class:!0});var hi=s(ie);h(Yr.$$.fragment,hi),iu=l(hi),Et=n(hi,"P",{});var Ul=s(Et);Os=n(Ul,"A",{href:!0});var h4=s(Os);cu=i(h4,"LogitsProcessor"),h4.forEach(r),lu=i(Ul," that enforces the specified token as the last generated token when "),Rc=n(Ul,"CODE",{});var g4=s(Rc);du=i(g4,"max_length"),g4.forEach(r),pu=i(Ul," is reached."),Ul.forEach(r),fu=l(hi),qs=n(hi,"DIV",{class:!0});var u4=s(qs);h(Xr.$$.fragment,u4),u4.forEach(r),hi.forEach(r),Md=l(e),ce=n(e,"DIV",{class:!0});var gi=s(ce);h(Jr.$$.fragment,gi),mu=l(gi),M=n(gi,"P",{});var ft=s(M);Bs=n(ft,"A",{href:!0});var _4=s(Bs);hu=i(_4,"LogitsProcessor"),_4.forEach(r),gu=i(ft," that removes all "),Kc=n(ft,"CODE",{});var v4=s(Kc);uu=i(v4,"nan"),v4.forEach(r),_u=i(ft," and "),Uc=n(ft,"CODE",{});var b4=s(Uc);vu=i(b4,"inf"),b4.forEach(r),bu=i(ft,` values to avoid the generation method to fail. Note that using the logits processor should only be used if necessary since it can slow down the generation method. `),Yc=n(ft,"CODE",{});var $4=s(Yc);$u=i($4,"max_length"),$4.forEach(r),Tu=i(ft,` is reached.`),ft.forEach(r),yu=l(gi),Is=n(gi,"DIV",{class:!0});var T4=s(Is);h(Qr.$$.fragment,T4),T4.forEach(r),gi.forEach(r),Gd=l(e),le=n(e,"DIV",{class:!0});var ui=s(le);h(Zr.$$.fragment,ui),ku=l(ui),Xc=n(ui,"P",{});var y4=s(Xc);wu=i(y4,"Abstract base class for all logit processors that can be applied during generation."),y4.forEach(r),Lu=l(ui),Pt=n(ui,"DIV",{class:!0});var Yp=s(Pt);h(eo.$$.fragment,Yp),xu=l(Yp),Jc=n(Yp,"P",{});var k4=s(Jc);Eu=i(k4,"TF method for processing logits."),k4.forEach(r),Yp.forEach(r),ui.forEach(r),jd=l(e),de=n(e,"DIV",{class:!0});var _i=s(de);h(to.$$.fragment,_i),Pu=l(_i),B=n(_i,"P",{});var Me=s(B);Fu=i(Me,"This class can be used to create a list of "),As=n(Me,"A",{href:!0});var w4=s(As);Du=i(w4,"TFLogitsProcessor"),w4.forEach(r),zu=i(Me," to subsequently process a "),Qc=n(Me,"CODE",{});var L4=s(Qc);Su=i(L4,"scores"),L4.forEach(r),Ou=i(Me,` input tensor. This class inherits from list and adds a specific `),Zc=n(Me,"EM",{});var x4=s(Zc);el=n(x4,"STRONG",{});var E4=s(el);qu=i(E4,"call"),E4.forEach(r),x4.forEach(r),Bu=i(Me," method to apply each "),Cs=n(Me,"A",{href:!0});var P4=s(Cs);Iu=i(P4,"TFLogitsProcessor"),P4.forEach(r),Au=i(Me,` to the inputs.`),Me.forEach(r),Cu=l(_i),Ns=n(_i,"DIV",{class:!0});var F4=s(Ns);h(ro.$$.fragment,F4),F4.forEach(r),_i.forEach(r),Hd=l(e),pe=n(e,"DIV",{class:!0});var vi=s(pe);h(oo.$$.fragment,vi),Nu=l(vi),tl=n(vi,"P",{});var D4=s(tl);Wu=i(D4,"Abstract base class for all logit warpers that can be applied during generation with multinomial sampling."),D4.forEach(r),Vu=l(vi),Ft=n(vi,"DIV",{class:!0});var Xp=s(Ft);h(no.$$.fragment,Xp),Mu=l(Xp),rl=n(Xp,"P",{});var z4=s(rl);Gu=i(z4,"TF method for warping logits."),z4.forEach(r),Xp.forEach(r),vi.forEach(r),Rd=l(e),fe=n(e,"DIV",{class:!0});var bi=s(fe);h(so.$$.fragment,bi),ju=l(bi),Ws=n(bi,"P",{});var X1=s(Ws);Vs=n(X1,"A",{href:!0});var S4=s(Vs);Hu=i(S4,"TFLogitsWarper"),S4.forEach(r),Ru=i(X1," for temperature (exponential scaling output probability distribution)."),X1.forEach(r),Ku=l(bi),Ms=n(bi,"DIV",{class:!0});var O4=s(Ms);h(ao.$$.fragment,O4),O4.forEach(r),bi.forEach(r),Kd=l(e),me=n(e,"DIV",{class:!0});var $i=s(me);h(io.$$.fragment,$i),Uu=l($i),Gs=n($i,"P",{});var J1=s(Gs);js=n(J1,"A",{href:!0});var q4=s(js);Yu=i(q4,"TFLogitsWarper"),q4.forEach(r),Xu=i(J1," that performs top-p, i.e. restricting to top tokens summing to <= prob_cut_off."),J1.forEach(r),Ju=l($i),Hs=n($i,"DIV",{class:!0});var B4=s(Hs);h(co.$$.fragment,B4),B4.forEach(r),$i.forEach(r),Ud=l(e),he=n(e,"DIV",{class:!0});var Ti=s(he);h(lo.$$.fragment,Ti),Qu=l(Ti),Rs=n(Ti,"P",{});var Q1=s(Rs);Ks=n(Q1,"A",{href:!0});var I4=s(Ks);Zu=i(I4,"TFLogitsWarper"),I4.forEach(r),e_=i(Q1," that performs top-k, i.e. restricting to the k highest probability elements."),Q1.forEach(r),t_=l(Ti),Us=n(Ti,"DIV",{class:!0});var A4=s(Us);h(po.$$.fragment,A4),A4.forEach(r),Ti.forEach(r),Yd=l(e),ge=n(e,"DIV",{class:!0});var yi=s(ge);h(fo.$$.fragment,yi),r_=l(yi),Ys=n(yi,"P",{});var Z1=s(Ys);Xs=n(Z1,"A",{href:!0});var C4=s(Xs);o_=i(C4,"TFLogitsProcessor"),C4.forEach(r),n_=i(Z1," enforcing a min-length by setting EOS probability to 0."),Z1.forEach(r),s_=l(yi),Js=n(yi,"DIV",{class:!0});var N4=s(Js);h(mo.$$.fragment,N4),N4.forEach(r),yi.forEach(r),Xd=l(e),ue=n(e,"DIV",{class:!0});var ki=s(ue);h(ho.$$.fragment,ki),a_=l(ki),Qs=n(ki,"P",{});var e$=s(Qs);Zs=n(e$,"A",{href:!0});var W4=s(Zs);i_=i(W4,"TFLogitsProcessor"),W4.forEach(r),c_=i(e$," that enforces that specified sequences will never be sampled."),e$.forEach(r),l_=l(ki),ea=n(ki,"DIV",{class:!0});var V4=s(ea);h(go.$$.fragment,V4),V4.forEach(r),ki.forEach(r),Jd=l(e),_e=n(e,"DIV",{class:!0});var wi=s(_e);h(uo.$$.fragment,wi),d_=l(wi),Dt=n(wi,"P",{});var Yl=s(Dt);ta=n(Yl,"A",{href:!0});var M4=s(ta);p_=i(M4,"TFLogitsProcessor"),M4.forEach(r),f_=i(Yl,` that enforces no repetition of n-grams. See `),_o=n(Yl,"A",{href:!0,rel:!0});var G4=s(_o);m_=i(G4,"Fairseq"),G4.forEach(r),h_=i(Yl,"."),Yl.forEach(r),g_=l(wi),ra=n(wi,"DIV",{class:!0});var j4=s(ra);h(vo.$$.fragment,j4),j4.forEach(r),wi.forEach(r),Qd=l(e),ve=n(e,"DIV",{class:!0});var Li=s(ve);h(bo.$$.fragment,Li),u_=l(Li),oa=n(Li,"P",{});var t$=s(oa);na=n(t$,"A",{href:!0});var H4=s(na);__=i(H4,"TFLogitsProcessor"),H4.forEach(r),v_=i(t$," enforcing an exponential penalty on repeated sequences."),t$.forEach(r),b_=l(Li),sa=n(Li,"DIV",{class:!0});var R4=s(sa);h($o.$$.fragment,R4),R4.forEach(r),Li.forEach(r),Zd=l(e),be=n(e,"DIV",{class:!0});var xi=s(be);h(To.$$.fragment,xi),$_=l(xi),ol=n(xi,"P",{});var K4=s(ol);T_=i(K4,"Abstract base class for all logit processors that can be applied during generation."),K4.forEach(r),y_=l(xi),zt=n(xi,"DIV",{class:!0});var Jp=s(zt);h(yo.$$.fragment,Jp),k_=l(Jp),nl=n(Jp,"P",{});var U4=s(nl);w_=i(U4,"Flax method for processing logits."),U4.forEach(r),Jp.forEach(r),xi.forEach(r),ep=l(e),$e=n(e,"DIV",{class:!0});var Ei=s($e);h(ko.$$.fragment,Ei),L_=l(Ei),L=n(Ei,"P",{});var q=s(L);x_=i(q,"This class can be used to create a list of "),aa=n(q,"A",{href:!0});var Y4=s(aa);E_=i(Y4,"FlaxLogitsProcessor"),Y4.forEach(r),P_=i(q," or "),ia=n(q,"A",{href:!0});var X4=s(ia);F_=i(X4,"FlaxLogitsWarper"),X4.forEach(r),D_=i(q,` to subsequently process a `),sl=n(q,"CODE",{});var J4=s(sl);z_=i(J4,"scores"),J4.forEach(r),S_=i(q," input tensor. This class inherits from list and adds a specific "),al=n(q,"EM",{});var Q4=s(al);il=n(Q4,"STRONG",{});var Z4=s(il);O_=i(Z4,"call"),Z4.forEach(r),Q4.forEach(r),q_=i(q,` method to apply each `),ca=n(q,"A",{href:!0});var ey=s(ca);B_=i(ey,"FlaxLogitsProcessor"),ey.forEach(r),I_=i(q," or "),la=n(q,"A",{href:!0});var ty=s(la);A_=i(ty,"FlaxLogitsWarper"),ty.forEach(r),C_=i(q," to the inputs."),q.forEach(r),N_=l(Ei),da=n(Ei,"DIV",{class:!0});var ry=s(da);h(wo.$$.fragment,ry),ry.forEach(r),Ei.forEach(r),tp=l(e),Te=n(e,"DIV",{class:!0});var Pi=s(Te);h(Lo.$$.fragment,Pi),W_=l(Pi),cl=n(Pi,"P",{});var oy=s(cl);V_=i(oy,"Abstract base class for all logit warpers that can be applied during generation with multinomial sampling."),oy.forEach(r),M_=l(Pi),St=n(Pi,"DIV",{class:!0});var Qp=s(St);h(xo.$$.fragment,Qp),G_=l(Qp),ll=n(Qp,"P",{});var ny=s(ll);j_=i(ny,"Flax method for warping logits."),ny.forEach(r),Qp.forEach(r),Pi.forEach(r),rp=l(e),ye=n(e,"DIV",{class:!0});var Fi=s(ye);h(Eo.$$.fragment,Fi),H_=l(Fi),pa=n(Fi,"P",{});var r$=s(pa);fa=n(r$,"A",{href:!0});var sy=s(fa);R_=i(sy,"FlaxLogitsWarper"),sy.forEach(r),K_=i(r$," for temperature (exponential scaling output probability distribution)."),r$.forEach(r),U_=l(Fi),ma=n(Fi,"DIV",{class:!0});var ay=s(ma);h(Po.$$.fragment,ay),ay.forEach(r),Fi.forEach(r),op=l(e),ke=n(e,"DIV",{class:!0});var Di=s(ke);h(Fo.$$.fragment,Di),Y_=l(Di),ha=n(Di,"P",{});var o$=s(ha);ga=n(o$,"A",{href:!0});var iy=s(ga);X_=i(iy,"FlaxLogitsWarper"),iy.forEach(r),J_=i(o$," that performs top-p, i.e. restricting to top tokens summing to prob_cut_off <= prob_cut_off."),o$.forEach(r),Q_=l(Di),ua=n(Di,"DIV",{class:!0});var cy=s(ua);h(Do.$$.fragment,cy),cy.forEach(r),Di.forEach(r),np=l(e),we=n(e,"DIV",{class:!0});var zi=s(we);h(zo.$$.fragment,zi),Z_=l(zi),_a=n(zi,"P",{});var n$=s(_a);va=n(n$,"A",{href:!0});var ly=s(va);ev=i(ly,"FlaxLogitsWarper"),ly.forEach(r),tv=i(n$," that performs top-k, i.e. restricting to the k highest probability elements."),n$.forEach(r),rv=l(zi),ba=n(zi,"DIV",{class:!0});var dy=s(ba);h(So.$$.fragment,dy),dy.forEach(r),zi.forEach(r),sp=l(e),Le=n(e,"DIV",{class:!0});var Si=s(Le);h(Oo.$$.fragment,Si),ov=l(Si),$a=n(Si,"P",{});var s$=s($a);Ta=n(s$,"A",{href:!0});var py=s(Ta);nv=i(py,"FlaxLogitsProcessor"),py.forEach(r),sv=i(s$," that enforces the specified token as the first generated token."),s$.forEach(r),av=l(Si),ya=n(Si,"DIV",{class:!0});var fy=s(ya);h(qo.$$.fragment,fy),fy.forEach(r),Si.forEach(r),ap=l(e),xe=n(e,"DIV",{class:!0});var Oi=s(xe);h(Bo.$$.fragment,Oi),iv=l(Oi),Ot=n(Oi,"P",{});var Xl=s(Ot);ka=n(Xl,"A",{href:!0});var my=s(ka);cv=i(my,"FlaxLogitsProcessor"),my.forEach(r),lv=i(Xl," that enforces the specified token as the last generated token when "),dl=n(Xl,"CODE",{});var hy=s(dl);dv=i(hy,"max_length"),hy.forEach(r),pv=i(Xl," is reached."),Xl.forEach(r),fv=l(Oi),wa=n(Oi,"DIV",{class:!0});var gy=s(wa);h(Io.$$.fragment,gy),gy.forEach(r),Oi.forEach(r),ip=l(e),Ee=n(e,"DIV",{class:!0});var qi=s(Ee);h(Ao.$$.fragment,qi),mv=l(qi),La=n(qi,"P",{});var a$=s(La);xa=n(a$,"A",{href:!0});var uy=s(xa);hv=i(uy,"FlaxLogitsProcessor"),uy.forEach(r),gv=i(a$," enforcing a min-length by setting EOS probability to 0."),a$.forEach(r),uv=l(qi),Ea=n(qi,"DIV",{class:!0});var _y=s(Ea);h(Co.$$.fragment,_y),_y.forEach(r),qi.forEach(r),cp=l(e),ot=n(e,"H2",{class:!0});var Zp=s(ot);qt=n(Zp,"A",{id:!0,class:!0,href:!0});var vy=s(qt);pl=n(vy,"SPAN",{});var by=s(pl);h(No.$$.fragment,by),by.forEach(r),vy.forEach(r),_v=l(Zp),fl=n(Zp,"SPAN",{});var $y=s(fl);vv=i($y,"StoppingCriteria"),$y.forEach(r),Zp.forEach(r),lp=l(e),Bt=n(e,"P",{});var ef=s(Bt);bv=i(ef,"A "),Pa=n(ef,"A",{href:!0});var Ty=s(Pa);$v=i(Ty,"StoppingCriteria"),Ty.forEach(r),Tv=i(ef," can be used to change when to stop generation (other than EOS token)."),ef.forEach(r),dp=l(e),Pe=n(e,"DIV",{class:!0});var Bi=s(Pe);h(Wo.$$.fragment,Bi),yv=l(Bi),ml=n(Bi,"P",{});var yy=s(ml);kv=i(yy,"Abstract base class for all stopping criteria that can be applied during generation."),yy.forEach(r),wv=l(Bi),Fa=n(Bi,"DIV",{class:!0});var ky=s(Fa);h(Vo.$$.fragment,ky),ky.forEach(r),Bi.forEach(r),pp=l(e),nt=n(e,"DIV",{class:!0});var tf=s(nt);h(Mo.$$.fragment,tf),Lv=l(tf),Da=n(tf,"DIV",{class:!0});var wy=s(Da);h(Go.$$.fragment,wy),wy.forEach(r),tf.forEach(r),fp=l(e),Fe=n(e,"DIV",{class:!0});var Ii=s(Fe);h(jo.$$.fragment,Ii),xv=l(Ii),Ho=n(Ii,"P",{});var rf=s(Ho);Ev=i(rf,"This class can be used to stop generation whenever the full generated number of tokens exceeds "),hl=n(rf,"CODE",{});var Ly=s(hl);Pv=i(Ly,"max_length"),Ly.forEach(r),Fv=i(rf,`. Keep in mind for decoder-only type of transformers, this will include the initial prompted tokens.`),rf.forEach(r),Dv=l(Ii),za=n(Ii,"DIV",{class:!0});var xy=s(za);h(Ro.$$.fragment,xy),xy.forEach(r),Ii.forEach(r),mp=l(e),De=n(e,"DIV",{class:!0});var Ai=s(De);h(Ko.$$.fragment,Ai),zv=l(Ai),Uo=n(Ai,"P",{});var of=s(Uo);Sv=i(of,`This class can be used to stop generation whenever the full generation exceeds some amount of time. By default, the time will start being counted when you initialize this function. You can override this by passing an `),gl=n(of,"CODE",{});var Ey=s(gl);Ov=i(Ey,"initial_time"),Ey.forEach(r),qv=i(of,"."),of.forEach(r),Bv=l(Ai),Sa=n(Ai,"DIV",{class:!0});var Py=s(Sa);h(Yo.$$.fragment,Py),Py.forEach(r),Ai.forEach(r),hp=l(e),st=n(e,"H2",{class:!0});var nf=s(st);It=n(nf,"A",{id:!0,class:!0,href:!0});var Fy=s(It);ul=n(Fy,"SPAN",{});var Dy=s(ul);h(Xo.$$.fragment,Dy),Dy.forEach(r),Fy.forEach(r),Iv=l(nf),_l=n(nf,"SPAN",{});var zy=s(_l);Av=i(zy,"Constraints"),zy.forEach(r),nf.forEach(r),gp=l(e),At=n(e,"P",{});var sf=s(At);Cv=i(sf,"A "),Oa=n(sf,"A",{href:!0});var Sy=s(Oa);Nv=i(Sy,"Constraint"),Sy.forEach(r),Wv=i(sf," can be used to force the generation to include specific tokens or sequences in the output."),sf.forEach(r),up=l(e),$=n(e,"DIV",{class:!0});var T=s($);h(Jo.$$.fragment,T),Vv=l(T),vl=n(T,"P",{});var Oy=s(vl);Mv=i(Oy,`Abstract base class for all constraints that can be applied during generation. It must define how the constraint can be satisfied.`),Oy.forEach(r),Gv=l(T),bl=n(T,"P",{});var qy=s(bl);jv=i(qy,"All classes that inherit Constraint must follow the requirement that"),qy.forEach(r),Hv=l(T),h(Qo.$$.fragment,T),Rv=l(T),$l=n(T,"P",{});var By=s($l);Kv=i(By,"will always terminate (halt)."),By.forEach(r),Uv=l(T),Ct=n(T,"DIV",{class:!0});var af=s(Ct);h(Zo.$$.fragment,af),Yv=l(af),Tl=n(af,"P",{});var Iy=s(Tl);Xv=i(Iy,"When called, returns the token that would take this constraint one step closer to being fulfilled."),Iy.forEach(r),af.forEach(r),Jv=l(T),Nt=n(T,"DIV",{class:!0});var cf=s(Nt);h(en.$$.fragment,cf),Qv=l(cf),yl=n(cf,"P",{});var Ay=s(yl);Zv=i(Ay,"Creates a new instance of this constraint."),Ay.forEach(r),cf.forEach(r),eb=l(T),Wt=n(T,"DIV",{class:!0});var lf=s(Wt);h(tn.$$.fragment,lf),tb=l(lf),kl=n(lf,"P",{});var Cy=s(kl);rb=i(Cy,"Reads in a token and returns whether it creates progress."),Cy.forEach(r),lf.forEach(r),ob=l(T),Vt=n(T,"DIV",{class:!0});var df=s(Vt);h(rn.$$.fragment,df),nb=l(df),on=n(df,"P",{});var pf=s(on);sb=i(pf,"Returns the number of remaining steps of "),wl=n(pf,"CODE",{});var Ny=s(wl);ab=i(Ny,"advance()"),Ny.forEach(r),ib=i(pf," in order to complete this constraint."),pf.forEach(r),df.forEach(r),cb=l(T),Mt=n(T,"DIV",{class:!0});var ff=s(Mt);h(nn.$$.fragment,ff),lb=l(ff),Ll=n(ff,"P",{});var Wy=s(Ll);db=i(Wy,`Resets the state of this constraint to its initialization. We would call this in cases where the fulfillment of a constraint is abrupted by an unwanted token.`),Wy.forEach(r),ff.forEach(r),pb=l(T),Gt=n(T,"DIV",{class:!0});var mf=s(Gt);h(sn.$$.fragment,mf),fb=l(mf),xl=n(mf,"P",{});var Vy=s(xl);mb=i(Vy,"Tests whether this constraint has been properly defined."),Vy.forEach(r),mf.forEach(r),hb=l(T),Ce=n(T,"DIV",{class:!0});var Ci=s(Ce);h(an.$$.fragment,Ci),gb=l(Ci),cn=n(Ci,"P",{});var hf=s(cn);ub=i(hf,`Reads in a token and returns booleans that indicate the progress made by it. This function will update the state of this object unlikes `),El=n(hf,"CODE",{});var My=s(El);_b=i(My,"does_advance(self, token_id: int)"),My.forEach(r),vb=i(hf,"."),hf.forEach(r),bb=l(Ci),Pl=n(Ci,"P",{});var Gy=s(Pl);$b=i(Gy,`This isn\u2019t to test whether a certain token will advance the progress; it\u2019s to update its state as if it has been generated. This becomes important if token_id != desired token (refer to else statement in PhrasalConstraint)`),Gy.forEach(r),Ci.forEach(r),T.forEach(r),_p=l(e),at=n(e,"DIV",{class:!0});var gf=s(at);h(ln.$$.fragment,gf),Tb=l(gf),qa=n(gf,"P",{});var i$=s(qa);Ba=n(i$,"A",{href:!0});var jy=s(Ba);yb=i(jy,"Constraint"),jy.forEach(r),kb=i(i$," enforcing that an ordered sequence of tokens is included in the output."),i$.forEach(r),gf.forEach(r),vp=l(e),it=n(e,"DIV",{class:!0});var uf=s(it);h(dn.$$.fragment,uf),wb=l(uf),pn=n(uf,"P",{});var _f=s(pn);Lb=i(_f,"A special "),Ia=n(_f,"A",{href:!0});var Hy=s(Ia);xb=i(Hy,"Constraint"),Hy.forEach(r),Eb=i(_f," that is fulfilled by fulfilling just one of several constraints."),_f.forEach(r),uf.forEach(r),bp=l(e),I=n(e,"DIV",{class:!0});var Yt=s(I);h(fn.$$.fragment,Yt),Pb=l(Yt),Fl=n(Yt,"P",{});var Ry=s(Fl);Fb=i(Ry,"A class for beam scorers to track its progress through a list of constraints."),Ry.forEach(r),Db=l(Yt),E=n(Yt,"DIV",{class:!0});var G=s(E);h(mn.$$.fragment,G),zb=l(G),Dl=n(G,"P",{});var Ky=s(Dl);Sb=i(Ky,`The list of tokens to generate such that we can make progress. By \u201Clist\u201D we don\u2019t mean the list of token that will fully fulfill a constraint.`),Ky.forEach(r),Ob=l(G),ct=n(G,"P",{});var Ni=s(ct);qb=i(Ni,"Given constraints "),zl=n(Ni,"CODE",{});var Uy=s(zl);Bb=i(Uy,"c_i = {t_ij | j == # of tokens}"),Uy.forEach(r),Ib=i(Ni,`, If we\u2019re not in the middle of progressing through a specific constraint `),Sl=n(Ni,"CODE",{});var Yy=s(Sl);Ab=i(Yy,"c_i"),Yy.forEach(r),Cb=i(Ni,", we return:"),Ni.forEach(r),Nb=l(G),Ol=n(G,"P",{});var Xy=s(Ol);ql=n(Xy,"CODE",{});var Jy=s(ql);Wb=i(Jy,"[t_k1 for k in indices of unfulfilled constraints]"),Jy.forEach(r),Xy.forEach(r),Vb=l(G),ze=n(G,"P",{});var Xt=s(ze);Mb=i(Xt,`If we are in the middle of a constraint, then we return: `),Bl=n(Xt,"CODE",{});var Qy=s(Bl);Gb=i(Qy,"[t_ij]"),Qy.forEach(r),jb=i(Xt,", where "),Il=n(Xt,"CODE",{});var Zy=s(Il);Hb=i(Zy,"i"),Zy.forEach(r),Rb=i(Xt," is the index of the inprogress constraint, "),Al=n(Xt,"CODE",{});var e2=s(Al);Kb=i(e2,"j"),e2.forEach(r),Ub=i(Xt," is the next step for the constraint."),Xt.forEach(r),Yb=l(G),Cl=n(G,"P",{});var t2=s(Cl);Xb=i(t2,`Though we don\u2019t care which constraint is fulfilled first, if we are in the progress of fulfilling a constraint, that\u2019s the only one we\u2019ll return.`),t2.forEach(r),G.forEach(r),Jb=l(Yt),jt=n(Yt,"DIV",{class:!0});var vf=s(jt);h(hn.$$.fragment,vf),Qb=l(vf),Nl=n(vf,"P",{});var r2=s(Nl);Zb=i(r2,"token_ids: the tokens generated thus far to reset the state of the progress through constraints."),r2.forEach(r),vf.forEach(r),Yt.forEach(r),$p=l(e),lt=n(e,"H2",{class:!0});var bf=s(lt);Ht=n(bf,"A",{id:!0,class:!0,href:!0});var o2=s(Ht);Wl=n(o2,"SPAN",{});var n2=s(Wl);h(gn.$$.fragment,n2),n2.forEach(r),o2.forEach(r),e1=l(bf),Vl=n(bf,"SPAN",{});var s2=s(Vl);t1=i(s2,"BeamSearch"),s2.forEach(r),bf.forEach(r),Tp=l(e),A=n(e,"DIV",{class:!0});var Jt=s(A);h(un.$$.fragment,Jt),r1=l(Jt),dt=n(Jt,"P",{});var Wi=s(dt);o1=i(Wi,"Abstract base class for all beam scorers that are used for "),Aa=n(Wi,"A",{href:!0});var a2=s(Aa);n1=i(a2,"beam_search()"),a2.forEach(r),s1=i(Wi,` and `),Ca=n(Wi,"A",{href:!0});var i2=s(Ca);a1=i(i2,"beam_sample()"),i2.forEach(r),i1=i(Wi,"."),Wi.forEach(r),c1=l(Jt),Na=n(Jt,"DIV",{class:!0});var c2=s(Na);h(_n.$$.fragment,c2),c2.forEach(r),l1=l(Jt),Wa=n(Jt,"DIV",{class:!0});var l2=s(Wa);h(vn.$$.fragment,l2),l2.forEach(r),Jt.forEach(r),yp=l(e),x=n(e,"DIV",{class:!0});var j=s(x);h(bn.$$.fragment,j),d1=l(j),Va=n(j,"P",{});var c$=s(Va);Ma=n(c$,"A",{href:!0});var d2=s(Ma);p1=i(d2,"BeamScorer"),d2.forEach(r),f1=i(c$," implementing standard beam search decoding."),c$.forEach(r),m1=l(j),$n=n(j,"P",{});var $f=s($n);h1=i($f,"Adapted in part from "),Tn=n($f,"A",{href:!0,rel:!0});var p2=s(Tn);g1=i(p2,`Facebook\u2019s XLM beam search code`),p2.forEach(r),u1=i($f,"."),$f.forEach(r),_1=l(j),Ga=n(j,"P",{});var l$=s(Ga);v1=i(l$,"Reference for the diverse beam search algorithm and implementation "),yn=n(l$,"A",{href:!0,rel:!0});var f2=s(yn);b1=i(f2,`Ashwin Kalyan\u2019s DBS implementation`),f2.forEach(r),l$.forEach(r),$1=l(j),ja=n(j,"DIV",{class:!0});var m2=s(ja);h(kn.$$.fragment,m2),m2.forEach(r),T1=l(j),Ha=n(j,"DIV",{class:!0});var h2=s(Ha);h(wn.$$.fragment,h2),h2.forEach(r),j.forEach(r),kp=l(e),C=n(e,"DIV",{class:!0});var Qt=s(C);h(Ln.$$.fragment,Qt),y1=l(Qt),Ra=n(Qt,"P",{});var d$=s(Ra);Ka=n(d$,"A",{href:!0});var g2=s(Ka);k1=i(g2,"BeamScorer"),g2.forEach(r),w1=i(d$," implementing constrained beam search decoding."),d$.forEach(r),L1=l(Qt),Ua=n(Qt,"DIV",{class:!0});var u2=s(Ua);h(xn.$$.fragment,u2),u2.forEach(r),x1=l(Qt),Ya=n(Qt,"DIV",{class:!0});var _2=s(Ya);h(En.$$.fragment,_2),_2.forEach(r),Qt.forEach(r),wp=l(e),pt=n(e,"H2",{class:!0});var Tf=s(pt);Rt=n(Tf,"A",{id:!0,class:!0,href:!0});var v2=s(Rt);Ml=n(v2,"SPAN",{});var b2=s(Ml);h(Pn.$$.fragment,b2),b2.forEach(r),v2.forEach(r),E1=l(Tf),Gl=n(Tf,"SPAN",{});var $2=s(Gl);P1=i($2,"Utilities"),$2.forEach(r),Tf.forEach(r),Lp=l(e),Se=n(e,"DIV",{class:!0});var Vi=s(Se);h(Fn.$$.fragment,Vi),F1=l(Vi),jl=n(Vi,"P",{});var T2=s(jl);D1=i(T2,"Filter a distribution of logits using top-k and/or nucleus (top-p) filtering"),T2.forEach(r),z1=l(Vi),Xa=n(Vi,"P",{});var p$=s(Xa);S1=i(p$,"From: "),Dn=n(p$,"A",{href:!0,rel:!0});var y2=s(Dn);O1=i(y2,"https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317"),y2.forEach(r),p$.forEach(r),Vi.forEach(r),xp=l(e),Oe=n(e,"DIV",{class:!0});var Mi=s(Oe);h(zn.$$.fragment,Mi),q1=l(Mi),Hl=n(Mi,"P",{});var k2=s(Hl);B1=i(k2,"Filter a distribution of logits using top-k and/or nucleus (top-p) filtering"),k2.forEach(r),I1=l(Mi),Ja=n(Mi,"P",{});var f$=s(Ja);A1=i(f$,"From: "),Sn=n(f$,"A",{href:!0,rel:!0});var w2=s(Sn);C1=i(w2,"https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317"),w2.forEach(r),f$.forEach(r),Mi.forEach(r),this.h()},h(){d(Be,"name","hf:doc:metadata"),d(Be,"content",JSON.stringify(z2)),d(H,"id","utilities-for-generation"),d(H,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(H,"href","#utilities-for-generation"),d(N,"class","relative group"),d(Bn,"href","/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.generate"),d(In,"href","/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.greedy_search"),d(An,"href","/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.sample"),d(Cn,"href","/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.beam_search"),d(Nn,"href","/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.beam_sample"),d(Wn,"href","/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.group_beam_search"),d(Vn,"href","/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.constrained_beam_search"),d(mt,"id","generate-outputs"),d(mt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(mt,"href","#generate-outputs"),d(Ge,"class","relative group"),d(Gn,"href","/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.generate"),d(jn,"href","/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput"),d(Hn,"href","/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.generate"),d(Kn,"href","/docs/transformers/pr_16143/en/internal/generation_utils#transformers.generation_utils.GreedySearchDecoderOnlyOutput"),d(gt,"id","transformers.generation_utils.GreedySearchDecoderOnlyOutput"),d(gt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(gt,"href","#transformers.generation_utils.GreedySearchDecoderOnlyOutput"),d(je,"class","relative group"),d(He,"class","docstring"),d(Re,"class","docstring"),d(ut,"class","docstring"),d(R,"class","docstring"),d(_t,"id","transformers.generation_utils.SampleDecoderOnlyOutput"),d(_t,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(_t,"href","#transformers.generation_utils.SampleDecoderOnlyOutput"),d(Ke,"class","relative group"),d(Ue,"class","docstring"),d(Ye,"class","docstring"),d(vt,"class","docstring"),d(K,"class","docstring"),d(bt,"id","transformers.generation_utils.BeamSearchDecoderOnlyOutput"),d(bt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(bt,"href","#transformers.generation_utils.BeamSearchDecoderOnlyOutput"),d(Xe,"class","relative group"),d(Je,"class","docstring"),d(Qe,"class","docstring"),d($t,"id","transformers.generation_utils.BeamSampleDecoderOnlyOutput"),d($t,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d($t,"href","#transformers.generation_utils.BeamSampleDecoderOnlyOutput"),d(Ze,"class","relative group"),d(et,"class","docstring"),d(tt,"class","docstring"),d(Tt,"id","transformers.LogitsProcessor"),d(Tt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Tt,"href","#transformers.LogitsProcessor"),d(rt,"class","relative group"),d(Zn,"href","/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessor"),d(kt,"class","docstring"),d(U,"class","docstring"),d(es,"href","/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessor"),d(ts,"href","/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsWarper"),d(rs,"href","/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessor"),d(os,"href","/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsWarper"),d(ns,"class","docstring"),d(Y,"class","docstring"),d(wt,"class","docstring"),d(X,"class","docstring"),d(as,"href","/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessor"),d(is,"class","docstring"),d(J,"class","docstring"),d(ls,"href","/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsWarper"),d(ds,"class","docstring"),d(Q,"class","docstring"),d(fs,"href","/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessor"),d(ms,"class","docstring"),d(Z,"class","docstring"),d(gs,"href","/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsWarper"),d(us,"class","docstring"),d(ee,"class","docstring"),d(vs,"href","/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsWarper"),d(bs,"class","docstring"),d(te,"class","docstring"),d($s,"href","/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessor"),d(Ar,"href","https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345"),d(Ar,"rel","nofollow"),d(Ts,"class","docstring"),d(re,"class","docstring"),d(ks,"href","/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessor"),d(ws,"class","docstring"),d(oe,"class","docstring"),d(Ls,"href","/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessor"),d(Mr,"href","https://arxiv.org/abs/2010.00904"),d(Mr,"rel","nofollow"),d(xs,"class","docstring"),d(ne,"class","docstring"),d(Es,"href","/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessor"),d(Ps,"href","/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.group_beam_search"),d(Hr,"href","https://arxiv.org/pdf/1610.02424.pdf"),d(Hr,"rel","nofollow"),d(Fs,"class","docstring"),d(se,"class","docstring"),d(zs,"href","/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessor"),d(Ss,"class","docstring"),d(ae,"class","docstring"),d(Os,"href","/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessor"),d(qs,"class","docstring"),d(ie,"class","docstring"),d(Bs,"href","/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessor"),d(Is,"class","docstring"),d(ce,"class","docstring"),d(Pt,"class","docstring"),d(le,"class","docstring"),d(As,"href","/docs/transformers/pr_16143/en/internal/generation_utils#transformers.TFLogitsProcessor"),d(Cs,"href","/docs/transformers/pr_16143/en/internal/generation_utils#transformers.TFLogitsProcessor"),d(Ns,"class","docstring"),d(de,"class","docstring"),d(Ft,"class","docstring"),d(pe,"class","docstring"),d(Vs,"href","/docs/transformers/pr_16143/en/internal/generation_utils#transformers.TFLogitsWarper"),d(Ms,"class","docstring"),d(fe,"class","docstring"),d(js,"href","/docs/transformers/pr_16143/en/internal/generation_utils#transformers.TFLogitsWarper"),d(Hs,"class","docstring"),d(me,"class","docstring"),d(Ks,"href","/docs/transformers/pr_16143/en/internal/generation_utils#transformers.TFLogitsWarper"),d(Us,"class","docstring"),d(he,"class","docstring"),d(Xs,"href","/docs/transformers/pr_16143/en/internal/generation_utils#transformers.TFLogitsProcessor"),d(Js,"class","docstring"),d(ge,"class","docstring"),d(Zs,"href","/docs/transformers/pr_16143/en/internal/generation_utils#transformers.TFLogitsProcessor"),d(ea,"class","docstring"),d(ue,"class","docstring"),d(ta,"href","/docs/transformers/pr_16143/en/internal/generation_utils#transformers.TFLogitsProcessor"),d(_o,"href","https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345"),d(_o,"rel","nofollow"),d(ra,"class","docstring"),d(_e,"class","docstring"),d(na,"href","/docs/transformers/pr_16143/en/internal/generation_utils#transformers.TFLogitsProcessor"),d(sa,"class","docstring"),d(ve,"class","docstring"),d(zt,"class","docstring"),d(be,"class","docstring"),d(aa,"href","/docs/transformers/pr_16143/en/internal/generation_utils#transformers.FlaxLogitsProcessor"),d(ia,"href","/docs/transformers/pr_16143/en/internal/generation_utils#transformers.FlaxLogitsWarper"),d(ca,"href","/docs/transformers/pr_16143/en/internal/generation_utils#transformers.FlaxLogitsProcessor"),d(la,"href","/docs/transformers/pr_16143/en/internal/generation_utils#transformers.FlaxLogitsWarper"),d(da,"class","docstring"),d($e,"class","docstring"),d(St,"class","docstring"),d(Te,"class","docstring"),d(fa,"href","/docs/transformers/pr_16143/en/internal/generation_utils#transformers.FlaxLogitsWarper"),d(ma,"class","docstring"),d(ye,"class","docstring"),d(ga,"href","/docs/transformers/pr_16143/en/internal/generation_utils#transformers.FlaxLogitsWarper"),d(ua,"class","docstring"),d(ke,"class","docstring"),d(va,"href","/docs/transformers/pr_16143/en/internal/generation_utils#transformers.FlaxLogitsWarper"),d(ba,"class","docstring"),d(we,"class","docstring"),d(Ta,"href","/docs/transformers/pr_16143/en/internal/generation_utils#transformers.FlaxLogitsProcessor"),d(ya,"class","docstring"),d(Le,"class","docstring"),d(ka,"href","/docs/transformers/pr_16143/en/internal/generation_utils#transformers.FlaxLogitsProcessor"),d(wa,"class","docstring"),d(xe,"class","docstring"),d(xa,"href","/docs/transformers/pr_16143/en/internal/generation_utils#transformers.FlaxLogitsProcessor"),d(Ea,"class","docstring"),d(Ee,"class","docstring"),d(qt,"id","transformers.StoppingCriteria"),d(qt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(qt,"href","#transformers.StoppingCriteria"),d(ot,"class","relative group"),d(Pa,"href","/docs/transformers/pr_16143/en/internal/generation_utils#transformers.StoppingCriteria"),d(Fa,"class","docstring"),d(Pe,"class","docstring"),d(Da,"class","docstring"),d(nt,"class","docstring"),d(za,"class","docstring"),d(Fe,"class","docstring"),d(Sa,"class","docstring"),d(De,"class","docstring"),d(It,"id","transformers.Constraint"),d(It,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(It,"href","#transformers.Constraint"),d(st,"class","relative group"),d(Oa,"href","/docs/transformers/pr_16143/en/internal/generation_utils#transformers.Constraint"),d(Ct,"class","docstring"),d(Nt,"class","docstring"),d(Wt,"class","docstring"),d(Vt,"class","docstring"),d(Mt,"class","docstring"),d(Gt,"class","docstring"),d(Ce,"class","docstring"),d($,"class","docstring"),d(Ba,"href","/docs/transformers/pr_16143/en/internal/generation_utils#transformers.Constraint"),d(at,"class","docstring"),d(Ia,"href","/docs/transformers/pr_16143/en/internal/generation_utils#transformers.Constraint"),d(it,"class","docstring"),d(E,"class","docstring"),d(jt,"class","docstring"),d(I,"class","docstring"),d(Ht,"id","transformers.BeamScorer"),d(Ht,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Ht,"href","#transformers.BeamScorer"),d(lt,"class","relative group"),d(Aa,"href","/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.beam_search"),d(Ca,"href","/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.beam_sample"),d(Na,"class","docstring"),d(Wa,"class","docstring"),d(A,"class","docstring"),d(Ma,"href","/docs/transformers/pr_16143/en/internal/generation_utils#transformers.BeamScorer"),d(Tn,"href","https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529"),d(Tn,"rel","nofollow"),d(yn,"href","https://github.com/ashwinkalyan/dbs/blob/master/dbs/beam_utils.lua"),d(yn,"rel","nofollow"),d(ja,"class","docstring"),d(Ha,"class","docstring"),d(x,"class","docstring"),d(Ka,"href","/docs/transformers/pr_16143/en/internal/generation_utils#transformers.BeamScorer"),d(Ua,"class","docstring"),d(Ya,"class","docstring"),d(C,"class","docstring"),d(Rt,"id","transformers.top_k_top_p_filtering"),d(Rt,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Rt,"href","#transformers.top_k_top_p_filtering"),d(pt,"class","relative group"),d(Dn,"href","https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317"),d(Dn,"rel","nofollow"),d(Se,"class","docstring"),d(Sn,"href","https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317"),d(Sn,"rel","nofollow"),d(Oe,"class","docstring")},m(e,p){t(document.head,Be),f(e,qn,p),f(e,N,p),t(N,H),t(H,Gi),g(Zt,Gi,null),t(N,kf),t(N,ji),t(ji,wf),f(e,Jl,p),f(e,y,p),t(y,Lf),t(y,Bn),t(Bn,xf),t(y,Ef),t(y,In),t(In,Pf),t(y,Ff),t(y,An),t(An,Df),t(y,zf),t(y,Cn),t(Cn,Sf),t(y,Of),t(y,Nn),t(Nn,qf),t(y,Bf),t(y,Wn),t(Wn,If),t(y,Af),t(y,Vn),t(Vn,Cf),t(y,Nf),f(e,Ql,p),f(e,Mn,p),t(Mn,Wf),f(e,Zl,p),f(e,Ge,p),t(Ge,mt),t(mt,Hi),g(er,Hi,null),t(Ge,Vf),t(Ge,Ri),t(Ri,Mf),f(e,ed,p),f(e,W,p),t(W,Gf),t(W,Gn),t(Gn,jf),t(W,Hf),t(W,jn),t(jn,Rf),t(W,Kf),t(W,Hn),t(Hn,Uf),t(W,Yf),f(e,td,p),f(e,Rn,p),t(Rn,Xf),f(e,rd,p),g(tr,e,p),f(e,od,p),f(e,Ie,p),t(Ie,Jf),t(Ie,Ki),t(Ki,Qf),t(Ie,Zf),t(Ie,Kn),t(Kn,em),t(Ie,tm),f(e,nd,p),f(e,V,p),t(V,Un),t(Un,Ui),t(Ui,rm),t(Un,om),t(V,nm),t(V,Yn),t(Yn,Yi),t(Yi,sm),t(Yn,am),t(V,im),t(V,Xn),t(Xn,Xi),t(Xi,cm),t(Xn,lm),t(V,dm),t(V,Jn),t(Jn,Ji),t(Ji,pm),t(Jn,fm),f(e,sd,p),f(e,k,p),t(k,mm),t(k,Qi),t(Qi,hm),t(k,gm),t(k,Zi),t(Zi,um),t(k,_m),t(k,ec),t(ec,vm),t(k,bm),t(k,tc),t(tc,$m),t(k,Tm),t(k,rc),t(rc,ym),t(k,km),t(k,oc),t(oc,wm),t(k,Lm),f(e,ad,p),f(e,F,p),t(F,xm),t(F,nc),t(nc,Em),t(F,Pm),t(F,sc),t(sc,Fm),t(F,Dm),t(F,ac),t(ac,zm),t(F,Sm),t(F,ic),t(ic,Om),t(F,qm),f(e,id,p),f(e,D,p),t(D,Bm),t(D,cc),t(cc,Im),t(D,Am),t(D,lc),t(lc,Cm),t(D,Nm),t(D,dc),t(dc,Wm),t(D,Vm),t(D,pc),t(pc,Mm),t(D,Gm),f(e,cd,p),g(rr,e,p),f(e,ld,p),f(e,ht,p),t(ht,jm),t(ht,fc),t(fc,Hm),t(ht,Rm),f(e,dd,p),f(e,z,p),t(z,Km),t(z,mc),t(mc,Um),t(z,Ym),t(z,hc),t(hc,Xm),t(z,Jm),t(z,gc),t(gc,Qm),t(z,Zm),t(z,uc),t(uc,eh),t(z,th),f(e,pd,p),f(e,Qn,p),t(Qn,rh),f(e,fd,p),f(e,je,p),t(je,gt),t(gt,_c),g(or,_c,null),t(je,oh),t(je,vc),t(vc,nh),f(e,md,p),f(e,He,p),g(nr,He,null),t(He,sh),t(He,bc),t(bc,ah),f(e,hd,p),f(e,Re,p),g(sr,Re,null),t(Re,ih),t(Re,$c),t($c,ch),f(e,gd,p),f(e,R,p),g(ar,R,null),t(R,lh),t(R,Tc),t(Tc,dh),t(R,ph),t(R,ut),g(ir,ut,null),t(ut,fh),t(ut,yc),t(yc,mh),f(e,ud,p),f(e,Ke,p),t(Ke,_t),t(_t,kc),g(cr,kc,null),t(Ke,hh),t(Ke,wc),t(wc,gh),f(e,_d,p),f(e,Ue,p),g(lr,Ue,null),t(Ue,uh),t(Ue,Lc),t(Lc,_h),f(e,vd,p),f(e,Ye,p),g(dr,Ye,null),t(Ye,vh),t(Ye,xc),t(xc,bh),f(e,bd,p),f(e,K,p),g(pr,K,null),t(K,$h),t(K,Ec),t(Ec,Th),t(K,yh),t(K,vt),g(fr,vt,null),t(vt,kh),t(vt,Pc),t(Pc,wh),f(e,$d,p),f(e,Xe,p),t(Xe,bt),t(bt,Fc),g(mr,Fc,null),t(Xe,Lh),t(Xe,Dc),t(Dc,xh),f(e,Td,p),f(e,Je,p),g(hr,Je,null),t(Je,Eh),t(Je,zc),t(zc,Ph),f(e,yd,p),f(e,Qe,p),g(gr,Qe,null),t(Qe,Fh),t(Qe,Sc),t(Sc,Dh),f(e,kd,p),f(e,Ze,p),t(Ze,$t),t($t,Oc),g(ur,Oc,null),t(Ze,zh),t(Ze,qc),t(qc,Sh),f(e,wd,p),f(e,et,p),g(_r,et,null),t(et,Oh),t(et,Bc),t(Bc,qh),f(e,Ld,p),f(e,tt,p),g(vr,tt,null),t(tt,Bh),t(tt,Ic),t(Ic,Ih),f(e,xd,p),f(e,rt,p),t(rt,Tt),t(Tt,Ac),g(br,Ac,null),t(rt,Ah),t(rt,Cc),t(Cc,Ch),f(e,Ed,p),f(e,yt,p),t(yt,Nh),t(yt,Zn),t(Zn,Wh),t(yt,Vh),f(e,Pd,p),f(e,U,p),g($r,U,null),t(U,Mh),t(U,Nc),t(Nc,Gh),t(U,jh),t(U,kt),g(Tr,kt,null),t(kt,Hh),t(kt,Wc),t(Wc,Rh),f(e,Fd,p),f(e,Y,p),g(yr,Y,null),t(Y,Kh),t(Y,w),t(w,Uh),t(w,es),t(es,Yh),t(w,Xh),t(w,ts),t(ts,Jh),t(w,Qh),t(w,Vc),t(Vc,Zh),t(w,eg),t(w,Mc),t(Mc,Gc),t(Gc,tg),t(w,rg),t(w,rs),t(rs,og),t(w,ng),t(w,os),t(os,sg),t(w,ag),t(Y,ig),t(Y,ns),g(kr,ns,null),f(e,Dd,p),f(e,X,p),g(wr,X,null),t(X,cg),t(X,jc),t(jc,lg),t(X,dg),t(X,wt),g(Lr,wt,null),t(wt,pg),t(wt,Hc),t(Hc,fg),f(e,zd,p),f(e,J,p),g(xr,J,null),t(J,mg),t(J,ss),t(ss,as),t(as,hg),t(ss,gg),t(J,ug),t(J,is),g(Er,is,null),f(e,Sd,p),f(e,Q,p),g(Pr,Q,null),t(Q,_g),t(Q,cs),t(cs,ls),t(ls,vg),t(cs,bg),t(Q,$g),t(Q,ds),g(Fr,ds,null),f(e,Od,p),f(e,Z,p),g(Dr,Z,null),t(Z,Tg),t(Z,ps),t(ps,fs),t(fs,yg),t(ps,kg),t(Z,wg),t(Z,ms),g(zr,ms,null),f(e,qd,p),f(e,ee,p),g(Sr,ee,null),t(ee,Lg),t(ee,hs),t(hs,gs),t(gs,xg),t(hs,Eg),t(ee,Pg),t(ee,us),g(Or,us,null),f(e,Bd,p),f(e,te,p),g(qr,te,null),t(te,Fg),t(te,_s),t(_s,vs),t(vs,Dg),t(_s,zg),t(te,Sg),t(te,bs),g(Br,bs,null),f(e,Id,p),f(e,re,p),g(Ir,re,null),t(re,Og),t(re,Lt),t(Lt,$s),t($s,qg),t(Lt,Bg),t(Lt,Ar),t(Ar,Ig),t(Lt,Ag),t(re,Cg),t(re,Ts),g(Cr,Ts,null),f(e,Ad,p),f(e,oe,p),g(Nr,oe,null),t(oe,Ng),t(oe,ys),t(ys,ks),t(ks,Wg),t(ys,Vg),t(oe,Mg),t(oe,ws),g(Wr,ws,null),f(e,Cd,p),f(e,ne,p),g(Vr,ne,null),t(ne,Gg),t(ne,xt),t(xt,Ls),t(Ls,jg),t(xt,Hg),t(xt,Mr),t(Mr,Rg),t(xt,Kg),t(ne,Ug),t(ne,xs),g(Gr,xs,null),f(e,Nd,p),f(e,se,p),g(jr,se,null),t(se,Yg),t(se,Ae),t(Ae,Es),t(Es,Xg),t(Ae,Jg),t(Ae,Ps),t(Ps,Qg),t(Ae,Zg),t(Ae,Hr),t(Hr,eu),t(Ae,tu),t(se,ru),t(se,Fs),g(Rr,Fs,null),f(e,Wd,p),f(e,ae,p),g(Kr,ae,null),t(ae,ou),t(ae,Ds),t(Ds,zs),t(zs,nu),t(Ds,su),t(ae,au),t(ae,Ss),g(Ur,Ss,null),f(e,Vd,p),f(e,ie,p),g(Yr,ie,null),t(ie,iu),t(ie,Et),t(Et,Os),t(Os,cu),t(Et,lu),t(Et,Rc),t(Rc,du),t(Et,pu),t(ie,fu),t(ie,qs),g(Xr,qs,null),f(e,Md,p),f(e,ce,p),g(Jr,ce,null),t(ce,mu),t(ce,M),t(M,Bs),t(Bs,hu),t(M,gu),t(M,Kc),t(Kc,uu),t(M,_u),t(M,Uc),t(Uc,vu),t(M,bu),t(M,Yc),t(Yc,$u),t(M,Tu),t(ce,yu),t(ce,Is),g(Qr,Is,null),f(e,Gd,p),f(e,le,p),g(Zr,le,null),t(le,ku),t(le,Xc),t(Xc,wu),t(le,Lu),t(le,Pt),g(eo,Pt,null),t(Pt,xu),t(Pt,Jc),t(Jc,Eu),f(e,jd,p),f(e,de,p),g(to,de,null),t(de,Pu),t(de,B),t(B,Fu),t(B,As),t(As,Du),t(B,zu),t(B,Qc),t(Qc,Su),t(B,Ou),t(B,Zc),t(Zc,el),t(el,qu),t(B,Bu),t(B,Cs),t(Cs,Iu),t(B,Au),t(de,Cu),t(de,Ns),g(ro,Ns,null),f(e,Hd,p),f(e,pe,p),g(oo,pe,null),t(pe,Nu),t(pe,tl),t(tl,Wu),t(pe,Vu),t(pe,Ft),g(no,Ft,null),t(Ft,Mu),t(Ft,rl),t(rl,Gu),f(e,Rd,p),f(e,fe,p),g(so,fe,null),t(fe,ju),t(fe,Ws),t(Ws,Vs),t(Vs,Hu),t(Ws,Ru),t(fe,Ku),t(fe,Ms),g(ao,Ms,null),f(e,Kd,p),f(e,me,p),g(io,me,null),t(me,Uu),t(me,Gs),t(Gs,js),t(js,Yu),t(Gs,Xu),t(me,Ju),t(me,Hs),g(co,Hs,null),f(e,Ud,p),f(e,he,p),g(lo,he,null),t(he,Qu),t(he,Rs),t(Rs,Ks),t(Ks,Zu),t(Rs,e_),t(he,t_),t(he,Us),g(po,Us,null),f(e,Yd,p),f(e,ge,p),g(fo,ge,null),t(ge,r_),t(ge,Ys),t(Ys,Xs),t(Xs,o_),t(Ys,n_),t(ge,s_),t(ge,Js),g(mo,Js,null),f(e,Xd,p),f(e,ue,p),g(ho,ue,null),t(ue,a_),t(ue,Qs),t(Qs,Zs),t(Zs,i_),t(Qs,c_),t(ue,l_),t(ue,ea),g(go,ea,null),f(e,Jd,p),f(e,_e,p),g(uo,_e,null),t(_e,d_),t(_e,Dt),t(Dt,ta),t(ta,p_),t(Dt,f_),t(Dt,_o),t(_o,m_),t(Dt,h_),t(_e,g_),t(_e,ra),g(vo,ra,null),f(e,Qd,p),f(e,ve,p),g(bo,ve,null),t(ve,u_),t(ve,oa),t(oa,na),t(na,__),t(oa,v_),t(ve,b_),t(ve,sa),g($o,sa,null),f(e,Zd,p),f(e,be,p),g(To,be,null),t(be,$_),t(be,ol),t(ol,T_),t(be,y_),t(be,zt),g(yo,zt,null),t(zt,k_),t(zt,nl),t(nl,w_),f(e,ep,p),f(e,$e,p),g(ko,$e,null),t($e,L_),t($e,L),t(L,x_),t(L,aa),t(aa,E_),t(L,P_),t(L,ia),t(ia,F_),t(L,D_),t(L,sl),t(sl,z_),t(L,S_),t(L,al),t(al,il),t(il,O_),t(L,q_),t(L,ca),t(ca,B_),t(L,I_),t(L,la),t(la,A_),t(L,C_),t($e,N_),t($e,da),g(wo,da,null),f(e,tp,p),f(e,Te,p),g(Lo,Te,null),t(Te,W_),t(Te,cl),t(cl,V_),t(Te,M_),t(Te,St),g(xo,St,null),t(St,G_),t(St,ll),t(ll,j_),f(e,rp,p),f(e,ye,p),g(Eo,ye,null),t(ye,H_),t(ye,pa),t(pa,fa),t(fa,R_),t(pa,K_),t(ye,U_),t(ye,ma),g(Po,ma,null),f(e,op,p),f(e,ke,p),g(Fo,ke,null),t(ke,Y_),t(ke,ha),t(ha,ga),t(ga,X_),t(ha,J_),t(ke,Q_),t(ke,ua),g(Do,ua,null),f(e,np,p),f(e,we,p),g(zo,we,null),t(we,Z_),t(we,_a),t(_a,va),t(va,ev),t(_a,tv),t(we,rv),t(we,ba),g(So,ba,null),f(e,sp,p),f(e,Le,p),g(Oo,Le,null),t(Le,ov),t(Le,$a),t($a,Ta),t(Ta,nv),t($a,sv),t(Le,av),t(Le,ya),g(qo,ya,null),f(e,ap,p),f(e,xe,p),g(Bo,xe,null),t(xe,iv),t(xe,Ot),t(Ot,ka),t(ka,cv),t(Ot,lv),t(Ot,dl),t(dl,dv),t(Ot,pv),t(xe,fv),t(xe,wa),g(Io,wa,null),f(e,ip,p),f(e,Ee,p),g(Ao,Ee,null),t(Ee,mv),t(Ee,La),t(La,xa),t(xa,hv),t(La,gv),t(Ee,uv),t(Ee,Ea),g(Co,Ea,null),f(e,cp,p),f(e,ot,p),t(ot,qt),t(qt,pl),g(No,pl,null),t(ot,_v),t(ot,fl),t(fl,vv),f(e,lp,p),f(e,Bt,p),t(Bt,bv),t(Bt,Pa),t(Pa,$v),t(Bt,Tv),f(e,dp,p),f(e,Pe,p),g(Wo,Pe,null),t(Pe,yv),t(Pe,ml),t(ml,kv),t(Pe,wv),t(Pe,Fa),g(Vo,Fa,null),f(e,pp,p),f(e,nt,p),g(Mo,nt,null),t(nt,Lv),t(nt,Da),g(Go,Da,null),f(e,fp,p),f(e,Fe,p),g(jo,Fe,null),t(Fe,xv),t(Fe,Ho),t(Ho,Ev),t(Ho,hl),t(hl,Pv),t(Ho,Fv),t(Fe,Dv),t(Fe,za),g(Ro,za,null),f(e,mp,p),f(e,De,p),g(Ko,De,null),t(De,zv),t(De,Uo),t(Uo,Sv),t(Uo,gl),t(gl,Ov),t(Uo,qv),t(De,Bv),t(De,Sa),g(Yo,Sa,null),f(e,hp,p),f(e,st,p),t(st,It),t(It,ul),g(Xo,ul,null),t(st,Iv),t(st,_l),t(_l,Av),f(e,gp,p),f(e,At,p),t(At,Cv),t(At,Oa),t(Oa,Nv),t(At,Wv),f(e,up,p),f(e,$,p),g(Jo,$,null),t($,Vv),t($,vl),t(vl,Mv),t($,Gv),t($,bl),t(bl,jv),t($,Hv),g(Qo,$,null),t($,Rv),t($,$l),t($l,Kv),t($,Uv),t($,Ct),g(Zo,Ct,null),t(Ct,Yv),t(Ct,Tl),t(Tl,Xv),t($,Jv),t($,Nt),g(en,Nt,null),t(Nt,Qv),t(Nt,yl),t(yl,Zv),t($,eb),t($,Wt),g(tn,Wt,null),t(Wt,tb),t(Wt,kl),t(kl,rb),t($,ob),t($,Vt),g(rn,Vt,null),t(Vt,nb),t(Vt,on),t(on,sb),t(on,wl),t(wl,ab),t(on,ib),t($,cb),t($,Mt),g(nn,Mt,null),t(Mt,lb),t(Mt,Ll),t(Ll,db),t($,pb),t($,Gt),g(sn,Gt,null),t(Gt,fb),t(Gt,xl),t(xl,mb),t($,hb),t($,Ce),g(an,Ce,null),t(Ce,gb),t(Ce,cn),t(cn,ub),t(cn,El),t(El,_b),t(cn,vb),t(Ce,bb),t(Ce,Pl),t(Pl,$b),f(e,_p,p),f(e,at,p),g(ln,at,null),t(at,Tb),t(at,qa),t(qa,Ba),t(Ba,yb),t(qa,kb),f(e,vp,p),f(e,it,p),g(dn,it,null),t(it,wb),t(it,pn),t(pn,Lb),t(pn,Ia),t(Ia,xb),t(pn,Eb),f(e,bp,p),f(e,I,p),g(fn,I,null),t(I,Pb),t(I,Fl),t(Fl,Fb),t(I,Db),t(I,E),g(mn,E,null),t(E,zb),t(E,Dl),t(Dl,Sb),t(E,Ob),t(E,ct),t(ct,qb),t(ct,zl),t(zl,Bb),t(ct,Ib),t(ct,Sl),t(Sl,Ab),t(ct,Cb),t(E,Nb),t(E,Ol),t(Ol,ql),t(ql,Wb),t(E,Vb),t(E,ze),t(ze,Mb),t(ze,Bl),t(Bl,Gb),t(ze,jb),t(ze,Il),t(Il,Hb),t(ze,Rb),t(ze,Al),t(Al,Kb),t(ze,Ub),t(E,Yb),t(E,Cl),t(Cl,Xb),t(I,Jb),t(I,jt),g(hn,jt,null),t(jt,Qb),t(jt,Nl),t(Nl,Zb),f(e,$p,p),f(e,lt,p),t(lt,Ht),t(Ht,Wl),g(gn,Wl,null),t(lt,e1),t(lt,Vl),t(Vl,t1),f(e,Tp,p),f(e,A,p),g(un,A,null),t(A,r1),t(A,dt),t(dt,o1),t(dt,Aa),t(Aa,n1),t(dt,s1),t(dt,Ca),t(Ca,a1),t(dt,i1),t(A,c1),t(A,Na),g(_n,Na,null),t(A,l1),t(A,Wa),g(vn,Wa,null),f(e,yp,p),f(e,x,p),g(bn,x,null),t(x,d1),t(x,Va),t(Va,Ma),t(Ma,p1),t(Va,f1),t(x,m1),t(x,$n),t($n,h1),t($n,Tn),t(Tn,g1),t($n,u1),t(x,_1),t(x,Ga),t(Ga,v1),t(Ga,yn),t(yn,b1),t(x,$1),t(x,ja),g(kn,ja,null),t(x,T1),t(x,Ha),g(wn,Ha,null),f(e,kp,p),f(e,C,p),g(Ln,C,null),t(C,y1),t(C,Ra),t(Ra,Ka),t(Ka,k1),t(Ra,w1),t(C,L1),t(C,Ua),g(xn,Ua,null),t(C,x1),t(C,Ya),g(En,Ya,null),f(e,wp,p),f(e,pt,p),t(pt,Rt),t(Rt,Ml),g(Pn,Ml,null),t(pt,E1),t(pt,Gl),t(Gl,P1),f(e,Lp,p),f(e,Se,p),g(Fn,Se,null),t(Se,F1),t(Se,jl),t(jl,D1),t(Se,z1),t(Se,Xa),t(Xa,S1),t(Xa,Dn),t(Dn,O1),f(e,xp,p),f(e,Oe,p),g(zn,Oe,null),t(Oe,q1),t(Oe,Hl),t(Hl,B1),t(Oe,I1),t(Oe,Ja),t(Ja,A1),t(Ja,Sn),t(Sn,C1),Ep=!0},p:F2,i(e){Ep||(u(Zt.$$.fragment,e),u(er.$$.fragment,e),u(tr.$$.fragment,e),u(rr.$$.fragment,e),u(or.$$.fragment,e),u(nr.$$.fragment,e),u(sr.$$.fragment,e),u(ar.$$.fragment,e),u(ir.$$.fragment,e),u(cr.$$.fragment,e),u(lr.$$.fragment,e),u(dr.$$.fragment,e),u(pr.$$.fragment,e),u(fr.$$.fragment,e),u(mr.$$.fragment,e),u(hr.$$.fragment,e),u(gr.$$.fragment,e),u(ur.$$.fragment,e),u(_r.$$.fragment,e),u(vr.$$.fragment,e),u(br.$$.fragment,e),u($r.$$.fragment,e),u(Tr.$$.fragment,e),u(yr.$$.fragment,e),u(kr.$$.fragment,e),u(wr.$$.fragment,e),u(Lr.$$.fragment,e),u(xr.$$.fragment,e),u(Er.$$.fragment,e),u(Pr.$$.fragment,e),u(Fr.$$.fragment,e),u(Dr.$$.fragment,e),u(zr.$$.fragment,e),u(Sr.$$.fragment,e),u(Or.$$.fragment,e),u(qr.$$.fragment,e),u(Br.$$.fragment,e),u(Ir.$$.fragment,e),u(Cr.$$.fragment,e),u(Nr.$$.fragment,e),u(Wr.$$.fragment,e),u(Vr.$$.fragment,e),u(Gr.$$.fragment,e),u(jr.$$.fragment,e),u(Rr.$$.fragment,e),u(Kr.$$.fragment,e),u(Ur.$$.fragment,e),u(Yr.$$.fragment,e),u(Xr.$$.fragment,e),u(Jr.$$.fragment,e),u(Qr.$$.fragment,e),u(Zr.$$.fragment,e),u(eo.$$.fragment,e),u(to.$$.fragment,e),u(ro.$$.fragment,e),u(oo.$$.fragment,e),u(no.$$.fragment,e),u(so.$$.fragment,e),u(ao.$$.fragment,e),u(io.$$.fragment,e),u(co.$$.fragment,e),u(lo.$$.fragment,e),u(po.$$.fragment,e),u(fo.$$.fragment,e),u(mo.$$.fragment,e),u(ho.$$.fragment,e),u(go.$$.fragment,e),u(uo.$$.fragment,e),u(vo.$$.fragment,e),u(bo.$$.fragment,e),u($o.$$.fragment,e),u(To.$$.fragment,e),u(yo.$$.fragment,e),u(ko.$$.fragment,e),u(wo.$$.fragment,e),u(Lo.$$.fragment,e),u(xo.$$.fragment,e),u(Eo.$$.fragment,e),u(Po.$$.fragment,e),u(Fo.$$.fragment,e),u(Do.$$.fragment,e),u(zo.$$.fragment,e),u(So.$$.fragment,e),u(Oo.$$.fragment,e),u(qo.$$.fragment,e),u(Bo.$$.fragment,e),u(Io.$$.fragment,e),u(Ao.$$.fragment,e),u(Co.$$.fragment,e),u(No.$$.fragment,e),u(Wo.$$.fragment,e),u(Vo.$$.fragment,e),u(Mo.$$.fragment,e),u(Go.$$.fragment,e),u(jo.$$.fragment,e),u(Ro.$$.fragment,e),u(Ko.$$.fragment,e),u(Yo.$$.fragment,e),u(Xo.$$.fragment,e),u(Jo.$$.fragment,e),u(Qo.$$.fragment,e),u(Zo.$$.fragment,e),u(en.$$.fragment,e),u(tn.$$.fragment,e),u(rn.$$.fragment,e),u(nn.$$.fragment,e),u(sn.$$.fragment,e),u(an.$$.fragment,e),u(ln.$$.fragment,e),u(dn.$$.fragment,e),u(fn.$$.fragment,e),u(mn.$$.fragment,e),u(hn.$$.fragment,e),u(gn.$$.fragment,e),u(un.$$.fragment,e),u(_n.$$.fragment,e),u(vn.$$.fragment,e),u(bn.$$.fragment,e),u(kn.$$.fragment,e),u(wn.$$.fragment,e),u(Ln.$$.fragment,e),u(xn.$$.fragment,e),u(En.$$.fragment,e),u(Pn.$$.fragment,e),u(Fn.$$.fragment,e),u(zn.$$.fragment,e),Ep=!0)},o(e){_(Zt.$$.fragment,e),_(er.$$.fragment,e),_(tr.$$.fragment,e),_(rr.$$.fragment,e),_(or.$$.fragment,e),_(nr.$$.fragment,e),_(sr.$$.fragment,e),_(ar.$$.fragment,e),_(ir.$$.fragment,e),_(cr.$$.fragment,e),_(lr.$$.fragment,e),_(dr.$$.fragment,e),_(pr.$$.fragment,e),_(fr.$$.fragment,e),_(mr.$$.fragment,e),_(hr.$$.fragment,e),_(gr.$$.fragment,e),_(ur.$$.fragment,e),_(_r.$$.fragment,e),_(vr.$$.fragment,e),_(br.$$.fragment,e),_($r.$$.fragment,e),_(Tr.$$.fragment,e),_(yr.$$.fragment,e),_(kr.$$.fragment,e),_(wr.$$.fragment,e),_(Lr.$$.fragment,e),_(xr.$$.fragment,e),_(Er.$$.fragment,e),_(Pr.$$.fragment,e),_(Fr.$$.fragment,e),_(Dr.$$.fragment,e),_(zr.$$.fragment,e),_(Sr.$$.fragment,e),_(Or.$$.fragment,e),_(qr.$$.fragment,e),_(Br.$$.fragment,e),_(Ir.$$.fragment,e),_(Cr.$$.fragment,e),_(Nr.$$.fragment,e),_(Wr.$$.fragment,e),_(Vr.$$.fragment,e),_(Gr.$$.fragment,e),_(jr.$$.fragment,e),_(Rr.$$.fragment,e),_(Kr.$$.fragment,e),_(Ur.$$.fragment,e),_(Yr.$$.fragment,e),_(Xr.$$.fragment,e),_(Jr.$$.fragment,e),_(Qr.$$.fragment,e),_(Zr.$$.fragment,e),_(eo.$$.fragment,e),_(to.$$.fragment,e),_(ro.$$.fragment,e),_(oo.$$.fragment,e),_(no.$$.fragment,e),_(so.$$.fragment,e),_(ao.$$.fragment,e),_(io.$$.fragment,e),_(co.$$.fragment,e),_(lo.$$.fragment,e),_(po.$$.fragment,e),_(fo.$$.fragment,e),_(mo.$$.fragment,e),_(ho.$$.fragment,e),_(go.$$.fragment,e),_(uo.$$.fragment,e),_(vo.$$.fragment,e),_(bo.$$.fragment,e),_($o.$$.fragment,e),_(To.$$.fragment,e),_(yo.$$.fragment,e),_(ko.$$.fragment,e),_(wo.$$.fragment,e),_(Lo.$$.fragment,e),_(xo.$$.fragment,e),_(Eo.$$.fragment,e),_(Po.$$.fragment,e),_(Fo.$$.fragment,e),_(Do.$$.fragment,e),_(zo.$$.fragment,e),_(So.$$.fragment,e),_(Oo.$$.fragment,e),_(qo.$$.fragment,e),_(Bo.$$.fragment,e),_(Io.$$.fragment,e),_(Ao.$$.fragment,e),_(Co.$$.fragment,e),_(No.$$.fragment,e),_(Wo.$$.fragment,e),_(Vo.$$.fragment,e),_(Mo.$$.fragment,e),_(Go.$$.fragment,e),_(jo.$$.fragment,e),_(Ro.$$.fragment,e),_(Ko.$$.fragment,e),_(Yo.$$.fragment,e),_(Xo.$$.fragment,e),_(Jo.$$.fragment,e),_(Qo.$$.fragment,e),_(Zo.$$.fragment,e),_(en.$$.fragment,e),_(tn.$$.fragment,e),_(rn.$$.fragment,e),_(nn.$$.fragment,e),_(sn.$$.fragment,e),_(an.$$.fragment,e),_(ln.$$.fragment,e),_(dn.$$.fragment,e),_(fn.$$.fragment,e),_(mn.$$.fragment,e),_(hn.$$.fragment,e),_(gn.$$.fragment,e),_(un.$$.fragment,e),_(_n.$$.fragment,e),_(vn.$$.fragment,e),_(bn.$$.fragment,e),_(kn.$$.fragment,e),_(wn.$$.fragment,e),_(Ln.$$.fragment,e),_(xn.$$.fragment,e),_(En.$$.fragment,e),_(Pn.$$.fragment,e),_(Fn.$$.fragment,e),_(zn.$$.fragment,e),Ep=!1},d(e){r(Be),e&&r(qn),e&&r(N),v(Zt),e&&r(Jl),e&&r(y),e&&r(Ql),e&&r(Mn),e&&r(Zl),e&&r(Ge),v(er),e&&r(ed),e&&r(W),e&&r(td),e&&r(Rn),e&&r(rd),v(tr,e),e&&r(od),e&&r(Ie),e&&r(nd),e&&r(V),e&&r(sd),e&&r(k),e&&r(ad),e&&r(F),e&&r(id),e&&r(D),e&&r(cd),v(rr,e),e&&r(ld),e&&r(ht),e&&r(dd),e&&r(z),e&&r(pd),e&&r(Qn),e&&r(fd),e&&r(je),v(or),e&&r(md),e&&r(He),v(nr),e&&r(hd),e&&r(Re),v(sr),e&&r(gd),e&&r(R),v(ar),v(ir),e&&r(ud),e&&r(Ke),v(cr),e&&r(_d),e&&r(Ue),v(lr),e&&r(vd),e&&r(Ye),v(dr),e&&r(bd),e&&r(K),v(pr),v(fr),e&&r($d),e&&r(Xe),v(mr),e&&r(Td),e&&r(Je),v(hr),e&&r(yd),e&&r(Qe),v(gr),e&&r(kd),e&&r(Ze),v(ur),e&&r(wd),e&&r(et),v(_r),e&&r(Ld),e&&r(tt),v(vr),e&&r(xd),e&&r(rt),v(br),e&&r(Ed),e&&r(yt),e&&r(Pd),e&&r(U),v($r),v(Tr),e&&r(Fd),e&&r(Y),v(yr),v(kr),e&&r(Dd),e&&r(X),v(wr),v(Lr),e&&r(zd),e&&r(J),v(xr),v(Er),e&&r(Sd),e&&r(Q),v(Pr),v(Fr),e&&r(Od),e&&r(Z),v(Dr),v(zr),e&&r(qd),e&&r(ee),v(Sr),v(Or),e&&r(Bd),e&&r(te),v(qr),v(Br),e&&r(Id),e&&r(re),v(Ir),v(Cr),e&&r(Ad),e&&r(oe),v(Nr),v(Wr),e&&r(Cd),e&&r(ne),v(Vr),v(Gr),e&&r(Nd),e&&r(se),v(jr),v(Rr),e&&r(Wd),e&&r(ae),v(Kr),v(Ur),e&&r(Vd),e&&r(ie),v(Yr),v(Xr),e&&r(Md),e&&r(ce),v(Jr),v(Qr),e&&r(Gd),e&&r(le),v(Zr),v(eo),e&&r(jd),e&&r(de),v(to),v(ro),e&&r(Hd),e&&r(pe),v(oo),v(no),e&&r(Rd),e&&r(fe),v(so),v(ao),e&&r(Kd),e&&r(me),v(io),v(co),e&&r(Ud),e&&r(he),v(lo),v(po),e&&r(Yd),e&&r(ge),v(fo),v(mo),e&&r(Xd),e&&r(ue),v(ho),v(go),e&&r(Jd),e&&r(_e),v(uo),v(vo),e&&r(Qd),e&&r(ve),v(bo),v($o),e&&r(Zd),e&&r(be),v(To),v(yo),e&&r(ep),e&&r($e),v(ko),v(wo),e&&r(tp),e&&r(Te),v(Lo),v(xo),e&&r(rp),e&&r(ye),v(Eo),v(Po),e&&r(op),e&&r(ke),v(Fo),v(Do),e&&r(np),e&&r(we),v(zo),v(So),e&&r(sp),e&&r(Le),v(Oo),v(qo),e&&r(ap),e&&r(xe),v(Bo),v(Io),e&&r(ip),e&&r(Ee),v(Ao),v(Co),e&&r(cp),e&&r(ot),v(No),e&&r(lp),e&&r(Bt),e&&r(dp),e&&r(Pe),v(Wo),v(Vo),e&&r(pp),e&&r(nt),v(Mo),v(Go),e&&r(fp),e&&r(Fe),v(jo),v(Ro),e&&r(mp),e&&r(De),v(Ko),v(Yo),e&&r(hp),e&&r(st),v(Xo),e&&r(gp),e&&r(At),e&&r(up),e&&r($),v(Jo),v(Qo),v(Zo),v(en),v(tn),v(rn),v(nn),v(sn),v(an),e&&r(_p),e&&r(at),v(ln),e&&r(vp),e&&r(it),v(dn),e&&r(bp),e&&r(I),v(fn),v(mn),v(hn),e&&r($p),e&&r(lt),v(gn),e&&r(Tp),e&&r(A),v(un),v(_n),v(vn),e&&r(yp),e&&r(x),v(bn),v(kn),v(wn),e&&r(kp),e&&r(C),v(Ln),v(xn),v(En),e&&r(wp),e&&r(pt),v(Pn),e&&r(Lp),e&&r(Se),v(Fn),e&&r(xp),e&&r(Oe),v(zn)}}}const z2={local:"utilities-for-generation",sections:[{local:"generate-outputs",sections:[{local:"transformers.generation_utils.GreedySearchDecoderOnlyOutput",title:"GreedySearchOutput"},{local:"transformers.generation_utils.SampleDecoderOnlyOutput",title:"SampleOutput"},{local:"transformers.generation_utils.BeamSearchDecoderOnlyOutput",title:"BeamSearchOutput"},{local:"transformers.generation_utils.BeamSampleDecoderOnlyOutput",title:"BeamSampleOutput"}],title:"Generate Outputs"},{local:"transformers.LogitsProcessor",title:"LogitsProcessor"},{local:"transformers.StoppingCriteria",title:"StoppingCriteria"},{local:"transformers.Constraint",title:"Constraints"},{local:"transformers.BeamScorer",title:"BeamSearch"},{local:"transformers.top_k_top_p_filtering",title:"Utilities"}],title:"Utilities for Generation"};function S2(yf,Be,qn){let{fw:N}=Be;return yf.$$set=H=>{"fw"in H&&qn(0,N=H.fw)},[N]}class C2 extends L2{constructor(Be){super();x2(this,Be,S2,D2,E2,{fw:0})}}export{C2 as default,z2 as metadata};
418
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/internal/pipelines_utils.mdx-885320a3.js
import{S as Qa,i as Ba,s as Ra,e as a,k as p,w as h,t as s,M as Ma,c as o,d as r,m as l,a as n,x as u,h as i,b as m,F as e,g as d,y as g,L as za,q as v,o as _,B as $}from"../../chunks/vendor-4833417e.js";import{D as b}from"../../chunks/Docstring-4f315ed9.js";import{I as Vt}from"../../chunks/IconCopyLink-4b81c553.js";function Za(Ht){let S,ve,D,y,Oe,Z,Nt,Ce,Wt,nt,_e,Jt,st,$e,qt,it,A,N,Ie,G,jt,Te,Ut,pt,k,K,Qt,X,Bt,Pe,Rt,Mt,lt,L,Y,zt,Ve,Zt,mt,E,ee,Gt,be,Kt,He,Xt,Yt,te,er,Ne,tr,rr,ct,O,W,We,re,ar,Je,or,dt,f,ae,nr,qe,sr,ir,C,je,pr,lr,Ue,mr,cr,Qe,dr,fr,J,Be,hr,ur,Re,gr,vr,_r,q,oe,$r,I,Pr,we,br,wr,Me,Dr,yr,Er,j,ne,Fr,se,xr,De,Sr,Ar,kr,U,ie,Lr,ze,Or,ft,F,pe,Cr,Ze,Ir,Tr,Q,le,Vr,me,Hr,ye,Nr,Wr,ht,x,ce,Jr,Ge,qr,jr,B,de,Ur,Ke,Qr,ut,w,fe,Br,Xe,Rr,Mr,Ye,zr,Zr,R,he,Gr,et,Kr,gt,T,M,tt,ue,Xr,rt,Yr,vt,V,ge,ea,H,ta,Ee,ra,aa,at,oa,na,_t;return Z=new Vt({}),G=new Vt({}),K=new b({props:{name:"class transformers.pipelines.ArgumentHandler",anchor:"transformers.pipelines.ArgumentHandler",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/base.py#L394"}}),Y=new b({props:{name:"class transformers.pipelines.ZeroShotClassificationArgumentHandler",anchor:"transformers.pipelines.ZeroShotClassificationArgumentHandler",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/zero_shot_classification.py#L14"}}),ee=new b({props:{name:"class transformers.pipelines.QuestionAnsweringArgumentHandler",anchor:"transformers.pipelines.QuestionAnsweringArgumentHandler",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/question_answering.py#L32"}}),re=new Vt({}),ae=new b({props:{name:"class transformers.PipelineDataFormat",anchor:"transformers.PipelineDataFormat",parameters:[{name:"output_path",val:": typing.Optional[str]"},{name:"input_path",val:": typing.Optional[str]"},{name:"column",val:": typing.Optional[str]"},{name:"overwrite",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/base.py#L404",parametersDescription:[{anchor:"transformers.PipelineDataFormat.output_path",description:"<strong>output_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Where to save the outgoing data.",name:"output_path"},{anchor:"transformers.PipelineDataFormat.input_path",description:"<strong>input_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Where to look for the input data.",name:"input_path"},{anchor:"transformers.PipelineDataFormat.column",description:"<strong>column</strong> (<code>str</code>, <em>optional</em>) &#x2014; The column to read.",name:"column"},{anchor:"transformers.PipelineDataFormat.overwrite",description:`<strong>overwrite</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to overwrite the <code>output_path</code>.`,name:"overwrite"}]}}),oe=new b({props:{name:"from_str",anchor:"transformers.PipelineDataFormat.from_str",parameters:[{name:"format",val:": str"},{name:"output_path",val:": typing.Optional[str]"},{name:"input_path",val:": typing.Optional[str]"},{name:"column",val:": typing.Optional[str]"},{name:"overwrite",val:" = False"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/base.py#L481",parametersDescription:[{anchor:"transformers.PipelineDataFormat.from_str.output_path",description:`<strong>output_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Where to save the outgoing data.`,name:"output_path"},{anchor:"transformers.PipelineDataFormat.from_str.input_path",description:`<strong>input_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Where to look for the input data.`,name:"input_path"},{anchor:"transformers.PipelineDataFormat.from_str.column",description:`<strong>column</strong> (<code>str</code>, <em>optional</em>) &#x2014; The column to read.`,name:"column"},{anchor:"transformers.PipelineDataFormat.from_str.overwrite",description:`<strong>overwrite</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to overwrite the <code>output_path</code>.`,name:"overwrite"}],returnDescription:` <p>The proper data format.</p> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.PipelineDataFormat" >PipelineDataFormat</a></p> `}}),ne=new b({props:{name:"save",anchor:"transformers.PipelineDataFormat.save",parameters:[{name:"data",val:": typing.Union[dict, typing.List[dict]]"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/base.py#L453",parametersDescription:[{anchor:"transformers.PipelineDataFormat.save.data",description:"<strong>data</strong> (<code>dict</code> or list of <code>dict</code>) &#x2014; The data to store.",name:"data"}]}}),ie=new b({props:{name:"save_binary",anchor:"transformers.PipelineDataFormat.save_binary",parameters:[{name:"data",val:": typing.Union[dict, typing.List[dict]]"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/base.py#L463",parametersDescription:[{anchor:"transformers.PipelineDataFormat.save_binary.data",description:"<strong>data</strong> (<code>dict</code> or list of <code>dict</code>) &#x2014; The data to store.",name:"data"}],returnDescription:` <p>Path where the data has been saved.</p> `,returnType:` <p><code>str</code></p> `}}),pe=new b({props:{name:"class transformers.CsvPipelineDataFormat",anchor:"transformers.CsvPipelineDataFormat",parameters:[{name:"output_path",val:": typing.Optional[str]"},{name:"input_path",val:": typing.Optional[str]"},{name:"column",val:": typing.Optional[str]"},{name:"overwrite",val:" = False"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/base.py#L517",parametersDescription:[{anchor:"transformers.CsvPipelineDataFormat.output_path",description:"<strong>output_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Where to save the outgoing data.",name:"output_path"},{anchor:"transformers.CsvPipelineDataFormat.input_path",description:"<strong>input_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Where to look for the input data.",name:"input_path"},{anchor:"transformers.CsvPipelineDataFormat.column",description:"<strong>column</strong> (<code>str</code>, <em>optional</em>) &#x2014; The column to read.",name:"column"},{anchor:"transformers.CsvPipelineDataFormat.overwrite",description:`<strong>overwrite</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to overwrite the <code>output_path</code>.`,name:"overwrite"}]}}),le=new b({props:{name:"save",anchor:"transformers.CsvPipelineDataFormat.save",parameters:[{name:"data",val:": typing.List[dict]"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/base.py#L547",parametersDescription:[{anchor:"transformers.CsvPipelineDataFormat.save.data",description:"<strong>data</strong> (<code>List[dict]</code>) &#x2014; The data to store.",name:"data"}]}}),ce=new b({props:{name:"class transformers.JsonPipelineDataFormat",anchor:"transformers.JsonPipelineDataFormat",parameters:[{name:"output_path",val:": typing.Optional[str]"},{name:"input_path",val:": typing.Optional[str]"},{name:"column",val:": typing.Optional[str]"},{name:"overwrite",val:" = False"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/base.py#L561",parametersDescription:[{anchor:"transformers.JsonPipelineDataFormat.output_path",description:"<strong>output_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Where to save the outgoing data.",name:"output_path"},{anchor:"transformers.JsonPipelineDataFormat.input_path",description:"<strong>input_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Where to look for the input data.",name:"input_path"},{anchor:"transformers.JsonPipelineDataFormat.column",description:"<strong>column</strong> (<code>str</code>, <em>optional</em>) &#x2014; The column to read.",name:"column"},{anchor:"transformers.JsonPipelineDataFormat.overwrite",description:`<strong>overwrite</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to overwrite the <code>output_path</code>.`,name:"overwrite"}]}}),de=new b({props:{name:"save",anchor:"transformers.JsonPipelineDataFormat.save",parameters:[{name:"data",val:": dict"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/base.py#L592",parametersDescription:[{anchor:"transformers.JsonPipelineDataFormat.save.data",description:"<strong>data</strong> (<code>dict</code>) &#x2014; The data to store.",name:"data"}]}}),fe=new b({props:{name:"class transformers.PipedPipelineDataFormat",anchor:"transformers.PipedPipelineDataFormat",parameters:[{name:"output_path",val:": typing.Optional[str]"},{name:"input_path",val:": typing.Optional[str]"},{name:"column",val:": typing.Optional[str]"},{name:"overwrite",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/base.py#L603",parametersDescription:[{anchor:"transformers.PipedPipelineDataFormat.output_path",description:"<strong>output_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Where to save the outgoing data.",name:"output_path"},{anchor:"transformers.PipedPipelineDataFormat.input_path",description:"<strong>input_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Where to look for the input data.",name:"input_path"},{anchor:"transformers.PipedPipelineDataFormat.column",description:"<strong>column</strong> (<code>str</code>, <em>optional</em>) &#x2014; The column to read.",name:"column"},{anchor:"transformers.PipedPipelineDataFormat.overwrite",description:`<strong>overwrite</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to overwrite the <code>output_path</code>.`,name:"overwrite"}]}}),he=new b({props:{name:"save",anchor:"transformers.PipedPipelineDataFormat.save",parameters:[{name:"data",val:": dict"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/base.py#L633",parametersDescription:[{anchor:"transformers.PipedPipelineDataFormat.save.data",description:"<strong>data</strong> (<code>dict</code>) &#x2014; The data to store.",name:"data"}]}}),ue=new Vt({}),ge=new b({props:{name:"class transformers.pipelines.PipelineException",anchor:"transformers.pipelines.PipelineException",parameters:[{name:"task",val:": str"},{name:"model",val:": str"},{name:"reason",val:": str"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/base.py#L377",parametersDescription:[{anchor:"transformers.pipelines.PipelineException.task",description:"<strong>task</strong> (<code>str</code>) &#x2014; The task of the pipeline.",name:"task"},{anchor:"transformers.pipelines.PipelineException.model",description:"<strong>model</strong> (<code>str</code>) &#x2014; The model used by the pipeline.",name:"model"},{anchor:"transformers.pipelines.PipelineException.reason",description:"<strong>reason</strong> (<code>str</code>) &#x2014; The error message to display.",name:"reason"}]}}),{c(){S=a("meta"),ve=p(),D=a("h1"),y=a("a"),Oe=a("span"),h(Z.$$.fragment),Nt=p(),Ce=a("span"),Wt=s("Utilities for pipelines"),nt=p(),_e=a("p"),Jt=s("This page lists all the utility functions the library provides for pipelines."),st=p(),$e=a("p"),qt=s("Most of those are only useful if you are studying the code of the models in the library."),it=p(),A=a("h2"),N=a("a"),Ie=a("span"),h(G.$$.fragment),jt=p(),Te=a("span"),Ut=s("Argument handling"),pt=p(),k=a("div"),h(K.$$.fragment),Qt=p(),X=a("p"),Bt=s("Base interface for handling arguments for each "),Pe=a("a"),Rt=s("Pipeline"),Mt=s("."),lt=p(),L=a("div"),h(Y.$$.fragment),zt=p(),Ve=a("p"),Zt=s(`Handles arguments for zero-shot for text classification by turning each possible label into an NLI premise/hypothesis pair.`),mt=p(),E=a("div"),h(ee.$$.fragment),Gt=p(),be=a("p"),Kt=s(`QuestionAnsweringPipeline requires the user to provide multiple arguments (i.e. question & context) to be mapped to internal `),He=a("code"),Xt=s("SquadExample"),Yt=p(),te=a("p"),er=s("QuestionAnsweringArgumentHandler manages all the possible to create a "),Ne=a("code"),tr=s("SquadExample"),rr=s(`from the command-line supplied arguments.`),ct=p(),O=a("h2"),W=a("a"),We=a("span"),h(re.$$.fragment),ar=p(),Je=a("span"),or=s("Data format"),dt=p(),f=a("div"),h(ae.$$.fragment),nr=p(),qe=a("p"),sr=s(`Base class for all the pipeline supported data format both for reading and writing. Supported data formats currently includes:`),ir=p(),C=a("ul"),je=a("li"),pr=s("JSON"),lr=p(),Ue=a("li"),mr=s("CSV"),cr=p(),Qe=a("li"),dr=s("stdin/stdout (pipe)"),fr=p(),J=a("p"),Be=a("code"),hr=s("PipelineDataFormat"),ur=s(` also includes some utilities to work with multi-columns like mapping from datasets columns to pipelines keyword arguments through the `),Re=a("code"),gr=s("dataset_kwarg_1=dataset_column_1"),vr=s(" format."),_r=p(),q=a("div"),h(oe.$$.fragment),$r=p(),I=a("p"),Pr=s("Creates an instance of the right subclass of "),we=a("a"),br=s("PipelineDataFormat"),wr=s(" depending on "),Me=a("code"),Dr=s("format"),yr=s("."),Er=p(),j=a("div"),h(ne.$$.fragment),Fr=p(),se=a("p"),xr=s("Save the provided data object with the representation for the current "),De=a("a"),Sr=s("PipelineDataFormat"),Ar=s("."),kr=p(),U=a("div"),h(ie.$$.fragment),Lr=p(),ze=a("p"),Or=s("Save the provided data object as a pickle-formatted binary data on the disk."),ft=p(),F=a("div"),h(pe.$$.fragment),Cr=p(),Ze=a("p"),Ir=s("Support for pipelines using CSV data format."),Tr=p(),Q=a("div"),h(le.$$.fragment),Vr=p(),me=a("p"),Hr=s("Save the provided data object with the representation for the current "),ye=a("a"),Nr=s("PipelineDataFormat"),Wr=s("."),ht=p(),x=a("div"),h(ce.$$.fragment),Jr=p(),Ge=a("p"),qr=s("Support for pipelines using JSON file format."),jr=p(),B=a("div"),h(de.$$.fragment),Ur=p(),Ke=a("p"),Qr=s("Save the provided data object in a json file."),ut=p(),w=a("div"),h(fe.$$.fragment),Br=p(),Xe=a("p"),Rr=s("Read data from piped input to the python process. For multi columns data, columns should separated by"),Mr=p(),Ye=a("p"),zr=s("If columns are provided, then the output will be a dictionary with {column_x: value_x}"),Zr=p(),R=a("div"),h(he.$$.fragment),Gr=p(),et=a("p"),Kr=s("Print the data."),gt=p(),T=a("h2"),M=a("a"),tt=a("span"),h(ue.$$.fragment),Xr=p(),rt=a("span"),Yr=s("Utilities"),vt=p(),V=a("div"),h(ge.$$.fragment),ea=p(),H=a("p"),ta=s("Raised by a "),Ee=a("a"),ra=s("Pipeline"),aa=s(" when handling "),at=a("strong"),oa=s("call"),na=s("."),this.h()},l(t){const c=Ma('[data-svelte="svelte-1phssyn"]',document.head);S=o(c,"META",{name:!0,content:!0}),c.forEach(r),ve=l(t),D=o(t,"H1",{class:!0});var $t=n(D);y=o($t,"A",{id:!0,class:!0,href:!0});var ia=n(y);Oe=o(ia,"SPAN",{});var pa=n(Oe);u(Z.$$.fragment,pa),pa.forEach(r),ia.forEach(r),Nt=l($t),Ce=o($t,"SPAN",{});var la=n(Ce);Wt=i(la,"Utilities for pipelines"),la.forEach(r),$t.forEach(r),nt=l(t),_e=o(t,"P",{});var ma=n(_e);Jt=i(ma,"This page lists all the utility functions the library provides for pipelines."),ma.forEach(r),st=l(t),$e=o(t,"P",{});var ca=n($e);qt=i(ca,"Most of those are only useful if you are studying the code of the models in the library."),ca.forEach(r),it=l(t),A=o(t,"H2",{class:!0});var Pt=n(A);N=o(Pt,"A",{id:!0,class:!0,href:!0});var da=n(N);Ie=o(da,"SPAN",{});var fa=n(Ie);u(G.$$.fragment,fa),fa.forEach(r),da.forEach(r),jt=l(Pt),Te=o(Pt,"SPAN",{});var ha=n(Te);Ut=i(ha,"Argument handling"),ha.forEach(r),Pt.forEach(r),pt=l(t),k=o(t,"DIV",{class:!0});var bt=n(k);u(K.$$.fragment,bt),Qt=l(bt),X=o(bt,"P",{});var wt=n(X);Bt=i(wt,"Base interface for handling arguments for each "),Pe=o(wt,"A",{href:!0});var ua=n(Pe);Rt=i(ua,"Pipeline"),ua.forEach(r),Mt=i(wt,"."),wt.forEach(r),bt.forEach(r),lt=l(t),L=o(t,"DIV",{class:!0});var Dt=n(L);u(Y.$$.fragment,Dt),zt=l(Dt),Ve=o(Dt,"P",{});var ga=n(Ve);Zt=i(ga,`Handles arguments for zero-shot for text classification by turning each possible label into an NLI premise/hypothesis pair.`),ga.forEach(r),Dt.forEach(r),mt=l(t),E=o(t,"DIV",{class:!0});var Fe=n(E);u(ee.$$.fragment,Fe),Gt=l(Fe),be=o(Fe,"P",{});var sa=n(be);Kt=i(sa,`QuestionAnsweringPipeline requires the user to provide multiple arguments (i.e. question & context) to be mapped to internal `),He=o(sa,"CODE",{});var va=n(He);Xt=i(va,"SquadExample"),va.forEach(r),sa.forEach(r),Yt=l(Fe),te=o(Fe,"P",{});var yt=n(te);er=i(yt,"QuestionAnsweringArgumentHandler manages all the possible to create a "),Ne=o(yt,"CODE",{});var _a=n(Ne);tr=i(_a,"SquadExample"),_a.forEach(r),rr=i(yt,`from the command-line supplied arguments.`),yt.forEach(r),Fe.forEach(r),ct=l(t),O=o(t,"H2",{class:!0});var Et=n(O);W=o(Et,"A",{id:!0,class:!0,href:!0});var $a=n(W);We=o($a,"SPAN",{});var Pa=n(We);u(re.$$.fragment,Pa),Pa.forEach(r),$a.forEach(r),ar=l(Et),Je=o(Et,"SPAN",{});var ba=n(Je);or=i(ba,"Data format"),ba.forEach(r),Et.forEach(r),dt=l(t),f=o(t,"DIV",{class:!0});var P=n(f);u(ae.$$.fragment,P),nr=l(P),qe=o(P,"P",{});var wa=n(qe);sr=i(wa,`Base class for all the pipeline supported data format both for reading and writing. Supported data formats currently includes:`),wa.forEach(r),ir=l(P),C=o(P,"UL",{});var xe=n(C);je=o(xe,"LI",{});var Da=n(je);pr=i(Da,"JSON"),Da.forEach(r),lr=l(xe),Ue=o(xe,"LI",{});var ya=n(Ue);mr=i(ya,"CSV"),ya.forEach(r),cr=l(xe),Qe=o(xe,"LI",{});var Ea=n(Qe);dr=i(Ea,"stdin/stdout (pipe)"),Ea.forEach(r),xe.forEach(r),fr=l(P),J=o(P,"P",{});var ot=n(J);Be=o(ot,"CODE",{});var Fa=n(Be);hr=i(Fa,"PipelineDataFormat"),Fa.forEach(r),ur=i(ot,` also includes some utilities to work with multi-columns like mapping from datasets columns to pipelines keyword arguments through the `),Re=o(ot,"CODE",{});var xa=n(Re);gr=i(xa,"dataset_kwarg_1=dataset_column_1"),xa.forEach(r),vr=i(ot," format."),ot.forEach(r),_r=l(P),q=o(P,"DIV",{class:!0});var Ft=n(q);u(oe.$$.fragment,Ft),$r=l(Ft),I=o(Ft,"P",{});var Se=n(I);Pr=i(Se,"Creates an instance of the right subclass of "),we=o(Se,"A",{href:!0});var Sa=n(we);br=i(Sa,"PipelineDataFormat"),Sa.forEach(r),wr=i(Se," depending on "),Me=o(Se,"CODE",{});var Aa=n(Me);Dr=i(Aa,"format"),Aa.forEach(r),yr=i(Se,"."),Se.forEach(r),Ft.forEach(r),Er=l(P),j=o(P,"DIV",{class:!0});var xt=n(j);u(ne.$$.fragment,xt),Fr=l(xt),se=o(xt,"P",{});var St=n(se);xr=i(St,"Save the provided data object with the representation for the current "),De=o(St,"A",{href:!0});var ka=n(De);Sr=i(ka,"PipelineDataFormat"),ka.forEach(r),Ar=i(St,"."),St.forEach(r),xt.forEach(r),kr=l(P),U=o(P,"DIV",{class:!0});var At=n(U);u(ie.$$.fragment,At),Lr=l(At),ze=o(At,"P",{});var La=n(ze);Or=i(La,"Save the provided data object as a pickle-formatted binary data on the disk."),La.forEach(r),At.forEach(r),P.forEach(r),ft=l(t),F=o(t,"DIV",{class:!0});var Ae=n(F);u(pe.$$.fragment,Ae),Cr=l(Ae),Ze=o(Ae,"P",{});var Oa=n(Ze);Ir=i(Oa,"Support for pipelines using CSV data format."),Oa.forEach(r),Tr=l(Ae),Q=o(Ae,"DIV",{class:!0});var kt=n(Q);u(le.$$.fragment,kt),Vr=l(kt),me=o(kt,"P",{});var Lt=n(me);Hr=i(Lt,"Save the provided data object with the representation for the current "),ye=o(Lt,"A",{href:!0});var Ca=n(ye);Nr=i(Ca,"PipelineDataFormat"),Ca.forEach(r),Wr=i(Lt,"."),Lt.forEach(r),kt.forEach(r),Ae.forEach(r),ht=l(t),x=o(t,"DIV",{class:!0});var ke=n(x);u(ce.$$.fragment,ke),Jr=l(ke),Ge=o(ke,"P",{});var Ia=n(Ge);qr=i(Ia,"Support for pipelines using JSON file format."),Ia.forEach(r),jr=l(ke),B=o(ke,"DIV",{class:!0});var Ot=n(B);u(de.$$.fragment,Ot),Ur=l(Ot),Ke=o(Ot,"P",{});var Ta=n(Ke);Qr=i(Ta,"Save the provided data object in a json file."),Ta.forEach(r),Ot.forEach(r),ke.forEach(r),ut=l(t),w=o(t,"DIV",{class:!0});var z=n(w);u(fe.$$.fragment,z),Br=l(z),Xe=o(z,"P",{});var Va=n(Xe);Rr=i(Va,"Read data from piped input to the python process. For multi columns data, columns should separated by"),Va.forEach(r),Mr=l(z),Ye=o(z,"P",{});var Ha=n(Ye);zr=i(Ha,"If columns are provided, then the output will be a dictionary with {column_x: value_x}"),Ha.forEach(r),Zr=l(z),R=o(z,"DIV",{class:!0});var Ct=n(R);u(he.$$.fragment,Ct),Gr=l(Ct),et=o(Ct,"P",{});var Na=n(et);Kr=i(Na,"Print the data."),Na.forEach(r),Ct.forEach(r),z.forEach(r),gt=l(t),T=o(t,"H2",{class:!0});var It=n(T);M=o(It,"A",{id:!0,class:!0,href:!0});var Wa=n(M);tt=o(Wa,"SPAN",{});var Ja=n(tt);u(ue.$$.fragment,Ja),Ja.forEach(r),Wa.forEach(r),Xr=l(It),rt=o(It,"SPAN",{});var qa=n(rt);Yr=i(qa,"Utilities"),qa.forEach(r),It.forEach(r),vt=l(t),V=o(t,"DIV",{class:!0});var Tt=n(V);u(ge.$$.fragment,Tt),ea=l(Tt),H=o(Tt,"P",{});var Le=n(H);ta=i(Le,"Raised by a "),Ee=o(Le,"A",{href:!0});var ja=n(Ee);ra=i(ja,"Pipeline"),ja.forEach(r),aa=i(Le," when handling "),at=o(Le,"STRONG",{});var Ua=n(at);oa=i(Ua,"call"),Ua.forEach(r),na=i(Le,"."),Le.forEach(r),Tt.forEach(r),this.h()},h(){m(S,"name","hf:doc:metadata"),m(S,"content",JSON.stringify(Ga)),m(y,"id","utilities-for-pipelines"),m(y,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(y,"href","#utilities-for-pipelines"),m(D,"class","relative group"),m(N,"id","transformers.pipelines.ArgumentHandler"),m(N,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(N,"href","#transformers.pipelines.ArgumentHandler"),m(A,"class","relative group"),m(Pe,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.Pipeline"),m(k,"class","docstring"),m(L,"class","docstring"),m(E,"class","docstring"),m(W,"id","transformers.PipelineDataFormat"),m(W,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(W,"href","#transformers.PipelineDataFormat"),m(O,"class","relative group"),m(we,"href","/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.PipelineDataFormat"),m(q,"class","docstring"),m(De,"href","/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.PipelineDataFormat"),m(j,"class","docstring"),m(U,"class","docstring"),m(f,"class","docstring"),m(ye,"href","/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.PipelineDataFormat"),m(Q,"class","docstring"),m(F,"class","docstring"),m(B,"class","docstring"),m(x,"class","docstring"),m(R,"class","docstring"),m(w,"class","docstring"),m(M,"id","transformers.pipelines.PipelineException"),m(M,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),m(M,"href","#transformers.pipelines.PipelineException"),m(T,"class","relative group"),m(Ee,"href","/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.Pipeline"),m(V,"class","docstring")},m(t,c){e(document.head,S),d(t,ve,c),d(t,D,c),e(D,y),e(y,Oe),g(Z,Oe,null),e(D,Nt),e(D,Ce),e(Ce,Wt),d(t,nt,c),d(t,_e,c),e(_e,Jt),d(t,st,c),d(t,$e,c),e($e,qt),d(t,it,c),d(t,A,c),e(A,N),e(N,Ie),g(G,Ie,null),e(A,jt),e(A,Te),e(Te,Ut),d(t,pt,c),d(t,k,c),g(K,k,null),e(k,Qt),e(k,X),e(X,Bt),e(X,Pe),e(Pe,Rt),e(X,Mt),d(t,lt,c),d(t,L,c),g(Y,L,null),e(L,zt),e(L,Ve),e(Ve,Zt),d(t,mt,c),d(t,E,c),g(ee,E,null),e(E,Gt),e(E,be),e(be,Kt),e(be,He),e(He,Xt),e(E,Yt),e(E,te),e(te,er),e(te,Ne),e(Ne,tr),e(te,rr),d(t,ct,c),d(t,O,c),e(O,W),e(W,We),g(re,We,null),e(O,ar),e(O,Je),e(Je,or),d(t,dt,c),d(t,f,c),g(ae,f,null),e(f,nr),e(f,qe),e(qe,sr),e(f,ir),e(f,C),e(C,je),e(je,pr),e(C,lr),e(C,Ue),e(Ue,mr),e(C,cr),e(C,Qe),e(Qe,dr),e(f,fr),e(f,J),e(J,Be),e(Be,hr),e(J,ur),e(J,Re),e(Re,gr),e(J,vr),e(f,_r),e(f,q),g(oe,q,null),e(q,$r),e(q,I),e(I,Pr),e(I,we),e(we,br),e(I,wr),e(I,Me),e(Me,Dr),e(I,yr),e(f,Er),e(f,j),g(ne,j,null),e(j,Fr),e(j,se),e(se,xr),e(se,De),e(De,Sr),e(se,Ar),e(f,kr),e(f,U),g(ie,U,null),e(U,Lr),e(U,ze),e(ze,Or),d(t,ft,c),d(t,F,c),g(pe,F,null),e(F,Cr),e(F,Ze),e(Ze,Ir),e(F,Tr),e(F,Q),g(le,Q,null),e(Q,Vr),e(Q,me),e(me,Hr),e(me,ye),e(ye,Nr),e(me,Wr),d(t,ht,c),d(t,x,c),g(ce,x,null),e(x,Jr),e(x,Ge),e(Ge,qr),e(x,jr),e(x,B),g(de,B,null),e(B,Ur),e(B,Ke),e(Ke,Qr),d(t,ut,c),d(t,w,c),g(fe,w,null),e(w,Br),e(w,Xe),e(Xe,Rr),e(w,Mr),e(w,Ye),e(Ye,zr),e(w,Zr),e(w,R),g(he,R,null),e(R,Gr),e(R,et),e(et,Kr),d(t,gt,c),d(t,T,c),e(T,M),e(M,tt),g(ue,tt,null),e(T,Xr),e(T,rt),e(rt,Yr),d(t,vt,c),d(t,V,c),g(ge,V,null),e(V,ea),e(V,H),e(H,ta),e(H,Ee),e(Ee,ra),e(H,aa),e(H,at),e(at,oa),e(H,na),_t=!0},p:za,i(t){_t||(v(Z.$$.fragment,t),v(G.$$.fragment,t),v(K.$$.fragment,t),v(Y.$$.fragment,t),v(ee.$$.fragment,t),v(re.$$.fragment,t),v(ae.$$.fragment,t),v(oe.$$.fragment,t),v(ne.$$.fragment,t),v(ie.$$.fragment,t),v(pe.$$.fragment,t),v(le.$$.fragment,t),v(ce.$$.fragment,t),v(de.$$.fragment,t),v(fe.$$.fragment,t),v(he.$$.fragment,t),v(ue.$$.fragment,t),v(ge.$$.fragment,t),_t=!0)},o(t){_(Z.$$.fragment,t),_(G.$$.fragment,t),_(K.$$.fragment,t),_(Y.$$.fragment,t),_(ee.$$.fragment,t),_(re.$$.fragment,t),_(ae.$$.fragment,t),_(oe.$$.fragment,t),_(ne.$$.fragment,t),_(ie.$$.fragment,t),_(pe.$$.fragment,t),_(le.$$.fragment,t),_(ce.$$.fragment,t),_(de.$$.fragment,t),_(fe.$$.fragment,t),_(he.$$.fragment,t),_(ue.$$.fragment,t),_(ge.$$.fragment,t),_t=!1},d(t){r(S),t&&r(ve),t&&r(D),$(Z),t&&r(nt),t&&r(_e),t&&r(st),t&&r($e),t&&r(it),t&&r(A),$(G),t&&r(pt),t&&r(k),$(K),t&&r(lt),t&&r(L),$(Y),t&&r(mt),t&&r(E),$(ee),t&&r(ct),t&&r(O),$(re),t&&r(dt),t&&r(f),$(ae),$(oe),$(ne),$(ie),t&&r(ft),t&&r(F),$(pe),$(le),t&&r(ht),t&&r(x),$(ce),$(de),t&&r(ut),t&&r(w),$(fe),$(he),t&&r(gt),t&&r(T),$(ue),t&&r(vt),t&&r(V),$(ge)}}}const Ga={local:"utilities-for-pipelines",sections:[{local:"transformers.pipelines.ArgumentHandler",title:"Argument handling"},{local:"transformers.PipelineDataFormat",title:"Data format"},{local:"transformers.pipelines.PipelineException",title:"Utilities"}],title:"Utilities for pipelines"};function Ka(Ht,S,ve){let{fw:D}=S;return Ht.$$set=y=>{"fw"in y&&ve(0,D=y.fw)},[D]}class to extends Qa{constructor(S){super();Ba(this,S,Ka,Za,Ra,{fw:0})}}export{to as default,Ga as metadata};
419
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/internal/trainer_utils.mdx-b7a1b196.js
import{S as Oi,i as Ai,s as Ci,e as s,k as i,w as b,t as o,M as Li,c as r,d as t,m as c,a as n,x as v,h as l,b as d,F as e,g as m,y as _,L as Ii,q as w,o as $,B as y}from"../../chunks/vendor-4833417e.js";import{D as j}from"../../chunks/Docstring-4f315ed9.js";import{C as ka}from"../../chunks/CodeBlock-6a3d1b46.js";import{I as st}from"../../chunks/IconCopyLink-4b81c553.js";import"../../chunks/CopyButton-dacfbfaf.js";function Ui(vs){let O,Ve,P,T,rt,ne,_s,nt,ws,Ta,Y,$s,Me,ys,Es,xa,Re,Ds,Oa,L,W,ot,oe,js,lt,Ps,Aa,I,le,ks,it,Ts,Ca,U,ie,xs,ct,Os,La,z,ce,As,D,Cs,pt,Ls,Is,ht,Us,zs,dt,Ss,Hs,ut,Ns,Gs,Ia,S,pe,Vs,ft,Ms,Ua,H,J,mt,he,Rs,gt,Fs,za,N,de,qs,bt,Bs,Sa,G,Q,vt,ue,Ks,_t,Ys,Ha,u,fe,Ws,wt,Js,Qs,$t,Xs,Zs,yt,Et,er,tr,Dt,ar,sr,V,Fe,rr,jt,nr,or,qe,lr,Pt,ir,cr,Be,pr,kt,hr,dr,Tt,ur,fr,M,Ke,mr,xt,gr,br,Ye,vr,Ot,_r,wr,We,$r,At,yr,Er,Ct,Dr,jr,Lt,It,Pr,kr,Ut,Tr,xr,zt,St,Or,Ar,Ht,Cr,Lr,X,me,Ir,ge,Ur,Nt,zr,Sr,Hr,Z,be,Nr,Gt,Gr,Na,R,ee,Vt,ve,Vr,Mt,Mr,Ga,E,_e,Rr,we,Fr,Rt,qr,Br,Kr,$e,Yr,Ft,Wr,Jr,Qr,A,ye,Xr,qt,Zr,en,Ee,tn,Bt,an,sn,rn,te,De,nn,je,on,Kt,ln,cn,pn,ae,Pe,hn,ke,dn,Yt,un,fn,Va,F,se,Wt,Te,mn,Jt,gn,Ma,p,xe,bn,q,vn,Qt,_n,wn,Xt,$n,yn,En,Zt,Dn,jn,Oe,ea,Pn,kn,ta,Tn,xn,aa,On,An,sa,Cn,Ln,Ae,In,x,Un,ra,zn,Sn,na,Hn,Nn,oa,Gn,Vn,Mn,Ce,Le,Rn,la,Fn,qn,Bn,ia,Kn,Yn,Ie,Wn,ca,Jn,Qn,Xn,Ue,Zn,B,eo,pa,to,ao,ha,so,ro,no,da,oo,lo,ze,io,ua,co,po,ho,fa,uo,fo,Se,mo,ma,go,bo,ga,vo,_o,ba,wo,$o,He,yo,va,Eo,Do,jo,Ne,Po,_a,ko,To,wa,xo,Oo,$a,Ao,Co,ya,Lo,Io,Ge,Uo,Ea,zo,So,Je,Da,Ho,No,Go,K,Vo,ja,Mo,Ro,Pa,Fo,qo,Ra;return ne=new st({}),oe=new st({}),le=new j({props:{name:"class transformers.EvalPrediction",anchor:"transformers.EvalPrediction",parameters:[{name:"predictions",val:": typing.Union[numpy.ndarray, typing.Tuple[numpy.ndarray]]"},{name:"label_ids",val:": typing.Union[numpy.ndarray, typing.Tuple[numpy.ndarray]]"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_utils.py#L67",parametersDescription:[{anchor:"transformers.EvalPrediction.predictions",description:"<strong>predictions</strong> (<code>np.ndarray</code>) &#x2014; Predictions of the model.",name:"predictions"},{anchor:"transformers.EvalPrediction.label_ids",description:"<strong>label_ids</strong> (<code>np.ndarray</code>) &#x2014; Targets to be matched.",name:"label_ids"}]}}),ie=new j({props:{name:"class transformers.IntervalStrategy",anchor:"transformers.IntervalStrategy",parameters:[{name:"value",val:""},{name:"names",val:" = None"},{name:"module",val:" = None"},{name:"qualname",val:" = None"},{name:"type",val:" = None"},{name:"start",val:" = 1"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_utils.py#L115"}}),ce=new j({props:{name:"transformers.set_seed",anchor:"transformers.set_seed",parameters:[{name:"seed",val:": int"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_utils.py#L50",parametersDescription:[{anchor:"transformers.set_seed.seed",description:"<strong>seed</strong> (<code>int</code>) &#x2014; The seed to set.",name:"seed"}]}}),pe=new j({props:{name:"transformers.torch_distributed_zero_first",anchor:"transformers.torch_distributed_zero_first",parameters:[{name:"local_rank",val:": int"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_pt_utils.py#L212",parametersDescription:[{anchor:"transformers.torch_distributed_zero_first.local_rank",description:"<strong>local_rank</strong> (<code>int</code>) &#x2014; The rank of the local process.",name:"local_rank"}]}}),he=new st({}),de=new j({props:{name:"class transformers.trainer_callback.CallbackHandler",anchor:"transformers.trainer_callback.CallbackHandler",parameters:[{name:"callbacks",val:""},{name:"model",val:""},{name:"tokenizer",val:""},{name:"optimizer",val:""},{name:"lr_scheduler",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_callback.py#L284"}}),ue=new st({}),fe=new j({props:{name:"class transformers.trainer_pt_utils.DistributedTensorGatherer",anchor:"transformers.trainer_pt_utils.DistributedTensorGatherer",parameters:[{name:"world_size",val:""},{name:"num_samples",val:""},{name:"make_multiple_of",val:" = None"},{name:"padding_index",val:" = -100"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_pt_utils.py#L338",parametersDescription:[{anchor:"transformers.trainer_pt_utils.DistributedTensorGatherer.world_size",description:`<strong>world_size</strong> (<code>int</code>) &#x2014; The number of processes used in the distributed training.`,name:"world_size"},{anchor:"transformers.trainer_pt_utils.DistributedTensorGatherer.num_samples",description:`<strong>num_samples</strong> (<code>int</code>) &#x2014; The number of samples in our dataset.`,name:"num_samples"},{anchor:"transformers.trainer_pt_utils.DistributedTensorGatherer.make_multiple_of",description:`<strong>make_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If passed, the class assumes the datasets passed to each process are made to be a multiple of this argument (by adding samples).`,name:"make_multiple_of"},{anchor:"transformers.trainer_pt_utils.DistributedTensorGatherer.padding_index",description:`<strong>padding_index</strong> (<code>int</code>, <em>optional</em>, defaults to -100) &#x2014; The padding index to use if the arrays don&#x2019;t all have the same sequence length.`,name:"padding_index"}]}}),me=new j({props:{name:"add_arrays",anchor:"transformers.trainer_pt_utils.DistributedTensorGatherer.add_arrays",parameters:[{name:"arrays",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_pt_utils.py#L399"}}),be=new j({props:{name:"finalize",anchor:"transformers.trainer_pt_utils.DistributedTensorGatherer.finalize",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_pt_utils.py#L435"}}),ve=new st({}),_e=new j({props:{name:"class transformers.HfArgumentParser",anchor:"transformers.HfArgumentParser",parameters:[{name:"dataclass_types",val:": typing.Union[DataClassType, typing.Iterable[DataClassType]]"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/hf_argparser.py#L44"}}),ye=new j({props:{name:"parse_args_into_dataclasses",anchor:"transformers.HfArgumentParser.parse_args_into_dataclasses",parameters:[{name:"args",val:" = None"},{name:"return_remaining_strings",val:" = False"},{name:"look_for_args_file",val:" = True"},{name:"args_filename",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/hf_argparser.py#L161",returnDescription:` <ul> <li>the dataclass instances in the same order as they were passed to the initializer.abspath</li> <li>if applicable, an additional namespace for more (non-dataclass backed) arguments added to the parser after initialization.</li> <li>The potential list of remaining argument strings. (same as argparse.ArgumentParser.parse_known_args)</li> </ul> `,returnType:` <p>Tuple consisting of</p> `}}),De=new j({props:{name:"parse_dict",anchor:"transformers.HfArgumentParser.parse_dict",parameters:[{name:"args",val:": dict"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/hf_argparser.py#L234"}}),Pe=new j({props:{name:"parse_json_file",anchor:"transformers.HfArgumentParser.parse_json_file",parameters:[{name:"json_file",val:": str"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/hf_argparser.py#L220"}}),Te=new st({}),xe=new j({props:{name:"class transformers.debug_utils.DebugUnderflowOverflow",anchor:"transformers.debug_utils.DebugUnderflowOverflow",parameters:[{name:"model",val:""},{name:"max_frames_to_save",val:" = 21"},{name:"trace_batch_nums",val:" = []"},{name:"abort_after_batch_num",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/debug_utils.py#L28",parametersDescription:[{anchor:"transformers.debug_utils.DebugUnderflowOverflow.model",description:`<strong>model</strong> (<code>nn.Module</code>) &#x2014; The model to debug.`,name:"model"},{anchor:"transformers.debug_utils.DebugUnderflowOverflow.max_frames_to_save",description:`<strong>max_frames_to_save</strong> (<code>int</code>, <em>optional</em>, defaults to 21) &#x2014; How many frames back to record`,name:"max_frames_to_save"},{anchor:"transformers.debug_utils.DebugUnderflowOverflow.trace_batch_nums(List[int],",description:`<strong>trace_batch_nums(<code>List[int]</code>,</strong> <em>optional</em>, defaults to <code>[]</code>) &#x2014; Which batch numbers to trace (turns detection off)`,name:"trace_batch_nums(List[int],"},{anchor:"transformers.debug_utils.DebugUnderflowOverflow.abort_after_batch_num",description:"<strong>abort_after_batch_num</strong> (`int&#x201C;, <em>optional</em>) &#x2014;\nWhether to abort after a certain batch number has finished",name:"abort_after_batch_num"}]}}),Ae=new ka({props:{code:"debug_overflow = DebugUnderflowOverflow(model)",highlighted:"debug_overflow = DebugUnderflowOverflow(model)"}}),Ue=new ka({props:{code:`Detected inf/nan during batch_number=0 Last 21 forward frames: abs min abs max metadata [...] encoder.block.2.layer.1.DenseReluDense.wi_0 Linear 2.17e-07 4.50e+00 weight 1.79e-06 4.65e+00 input[0] 2.68e-06 3.70e+01 output encoder.block.2.layer.1.DenseReluDense.wi_1 Linear 8.08e-07 2.66e+01 weight 1.79e-06 4.65e+00 input[0] 1.27e-04 2.37e+02 output encoder.block.2.layer.1.DenseReluDense.wo Linear 1.01e-06 6.44e+00 weight 0.00e+00 9.74e+03 input[0] 3.18e-04 6.27e+04 output encoder.block.2.layer.1.DenseReluDense T5DenseGatedGeluDense 1.79e-06 4.65e+00 input[0] 3.18e-04 6.27e+04 output encoder.block.2.layer.1.dropout Dropout 3.18e-04 6.27e+04 input[0] 0.00e+00 inf output`,highlighted:`<span class="hljs-attribute">Detected</span> inf/nan during batch_number=<span class="hljs-number">0</span> <span class="hljs-attribute">Last</span> <span class="hljs-number">21</span> forward frames: <span class="hljs-attribute">abs</span> min abs max metadata<span class="hljs-meta"> [...]</span> <span class="hljs-attribute">encoder</span>.block.<span class="hljs-number">2</span>.layer.<span class="hljs-number">1</span>.DenseReluDense.wi_0 Linear <span class="hljs-attribute">2</span>.<span class="hljs-number">17</span>e-<span class="hljs-number">07</span> <span class="hljs-number">4</span>.<span class="hljs-number">50</span>e+<span class="hljs-number">00</span> weight <span class="hljs-attribute">1</span>.<span class="hljs-number">79</span>e-<span class="hljs-number">06</span> <span class="hljs-number">4</span>.<span class="hljs-number">65</span>e+<span class="hljs-number">00</span> input[<span class="hljs-number">0</span>] <span class="hljs-attribute">2</span>.<span class="hljs-number">68</span>e-<span class="hljs-number">06</span> <span class="hljs-number">3</span>.<span class="hljs-number">70</span>e+<span class="hljs-number">01</span> output <span class="hljs-attribute">encoder</span>.block.<span class="hljs-number">2</span>.layer.<span class="hljs-number">1</span>.DenseReluDense.wi_1 Linear <span class="hljs-attribute">8</span>.<span class="hljs-number">08</span>e-<span class="hljs-number">07</span> <span class="hljs-number">2</span>.<span class="hljs-number">66</span>e+<span class="hljs-number">01</span> weight <span class="hljs-attribute">1</span>.<span class="hljs-number">79</span>e-<span class="hljs-number">06</span> <span class="hljs-number">4</span>.<span class="hljs-number">65</span>e+<span class="hljs-number">00</span> input[<span class="hljs-number">0</span>] <span class="hljs-attribute">1</span>.<span class="hljs-number">27</span>e-<span class="hljs-number">04</span> <span class="hljs-number">2</span>.<span class="hljs-number">37</span>e+<span class="hljs-number">02</span> output <span class="hljs-attribute">encoder</span>.block.<span class="hljs-number">2</span>.layer.<span class="hljs-number">1</span>.DenseReluDense.wo Linear <span class="hljs-attribute">1</span>.<span class="hljs-number">01</span>e-<span class="hljs-number">06</span> <span class="hljs-number">6</span>.<span class="hljs-number">44</span>e+<span class="hljs-number">00</span> weight <span class="hljs-attribute">0</span>.<span class="hljs-number">00</span>e+<span class="hljs-number">00</span> <span class="hljs-number">9</span>.<span class="hljs-number">74</span>e+<span class="hljs-number">03</span> input[<span class="hljs-number">0</span>] <span class="hljs-attribute">3</span>.<span class="hljs-number">18</span>e-<span class="hljs-number">04</span> <span class="hljs-number">6</span>.<span class="hljs-number">27</span>e+<span class="hljs-number">04</span> output <span class="hljs-attribute">encoder</span>.block.<span class="hljs-number">2</span>.layer.<span class="hljs-number">1</span>.DenseReluDense T5DenseGatedGeluDense <span class="hljs-attribute">1</span>.<span class="hljs-number">79</span>e-<span class="hljs-number">06</span> <span class="hljs-number">4</span>.<span class="hljs-number">65</span>e+<span class="hljs-number">00</span> input[<span class="hljs-number">0</span>] <span class="hljs-attribute">3</span>.<span class="hljs-number">18</span>e-<span class="hljs-number">04</span> <span class="hljs-number">6</span>.<span class="hljs-number">27</span>e+<span class="hljs-number">04</span> output <span class="hljs-attribute">encoder</span>.block.<span class="hljs-number">2</span>.layer.<span class="hljs-number">1</span>.dropout Dropout <span class="hljs-attribute">3</span>.<span class="hljs-number">18</span>e-<span class="hljs-number">04</span> <span class="hljs-number">6</span>.<span class="hljs-number">27</span>e+<span class="hljs-number">04</span> input[<span class="hljs-number">0</span>] <span class="hljs-attribute">0</span>.<span class="hljs-number">00</span>e+<span class="hljs-number">00</span> inf output`}}),Se=new ka({props:{code:"debug_overflow = DebugUnderflowOverflow(model, max_frames_to_save=100)",highlighted:'debug_overflow = DebugUnderflowOverflow(model, max_frames_to_save=<span class="hljs-number">100</span>)'}}),Ne=new ka({props:{code:"debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1, 3])",highlighted:'debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[<span class="hljs-number">1</span>, <span class="hljs-number">3</span>])'}}),Ge=new ka({props:{code:"debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1, 3], abort_after_batch_num=3)",highlighted:'debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[<span class="hljs-number">1</span>, <span class="hljs-number">3</span>], abort_after_batch_num=<span class="hljs-number">3</span>)'}}),{c(){O=s("meta"),Ve=i(),P=s("h1"),T=s("a"),rt=s("span"),b(ne.$$.fragment),_s=i(),nt=s("span"),ws=o("Utilities for Trainer"),Ta=i(),Y=s("p"),$s=o("This page lists all the utility functions used by "),Me=s("a"),ys=o("Trainer"),Es=o("."),xa=i(),Re=s("p"),Ds=o("Most of those are only useful if you are studying the code of the Trainer in the library."),Oa=i(),L=s("h2"),W=s("a"),ot=s("span"),b(oe.$$.fragment),js=i(),lt=s("span"),Ps=o("Utilities"),Aa=i(),I=s("div"),b(le.$$.fragment),ks=i(),it=s("p"),Ts=o("Evaluation output (always contains labels), to be used to compute metrics."),Ca=i(),U=s("div"),b(ie.$$.fragment),xs=i(),ct=s("p"),Os=o("An enumeration."),La=i(),z=s("div"),b(ce.$$.fragment),As=i(),D=s("p"),Cs=o("Helper function for reproducible behavior to set the seed in "),pt=s("code"),Ls=o("random"),Is=o(", "),ht=s("code"),Us=o("numpy"),zs=o(", "),dt=s("code"),Ss=o("torch"),Hs=o(" and/or "),ut=s("code"),Ns=o("tf"),Gs=o(" (if installed)."),Ia=i(),S=s("div"),b(pe.$$.fragment),Vs=i(),ft=s("p"),Ms=o("Decorator to make all processes in distributed training wait for each local_master to do something."),Ua=i(),H=s("h2"),J=s("a"),mt=s("span"),b(he.$$.fragment),Rs=i(),gt=s("span"),Fs=o("Callbacks internals"),za=i(),N=s("div"),b(de.$$.fragment),qs=i(),bt=s("p"),Bs=o("Internal class that just calls the list of callbacks in order."),Sa=i(),G=s("h2"),Q=s("a"),vt=s("span"),b(ue.$$.fragment),Ks=i(),_t=s("span"),Ys=o("Distributed Evaluation"),Ha=i(),u=s("div"),b(fe.$$.fragment),Ws=i(),wt=s("p"),Js=o("A class responsible for properly gathering tensors (or nested list/tuple of tensors) on the CPU by chunks."),Qs=i(),$t=s("p"),Xs=o(`If our dataset has 16 samples with a batch size of 2 on 3 processes and we gather then transfer on CPU at every step, our sampler will generate the following indices:`),Zs=i(),yt=s("p"),Et=s("code"),er=o("[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1]"),tr=i(),Dt=s("p"),ar=o(`to get something of size a multiple of 3 (so that each process gets the same dataset length). Then process 0, 1 and 2 will be responsible of making predictions for the following samples:`),sr=i(),V=s("ul"),Fe=s("li"),rr=o("P0: "),jt=s("code"),nr=o("[0, 1, 2, 3, 4, 5]"),or=i(),qe=s("li"),lr=o("P1: "),Pt=s("code"),ir=o("[6, 7, 8, 9, 10, 11]"),cr=i(),Be=s("li"),pr=o("P2: "),kt=s("code"),hr=o("[12, 13, 14, 15, 0, 1]"),dr=i(),Tt=s("p"),ur=o("The first batch treated on each process will be"),fr=i(),M=s("ul"),Ke=s("li"),mr=o("P0: "),xt=s("code"),gr=o("[0, 1]"),br=i(),Ye=s("li"),vr=o("P1: "),Ot=s("code"),_r=o("[6, 7]"),wr=i(),We=s("li"),$r=o("P2: "),At=s("code"),yr=o("[12, 13]"),Er=i(),Ct=s("p"),Dr=o(`So if we gather at the end of the first batch, we will get a tensor (nested list/tuple of tensor) corresponding to the following indices:`),jr=i(),Lt=s("p"),It=s("code"),Pr=o("[0, 1, 6, 7, 12, 13]"),kr=i(),Ut=s("p"),Tr=o(`If we directly concatenate our results without taking any precautions, the user will then get the predictions for the indices in this order at the end of the prediction loop:`),xr=i(),zt=s("p"),St=s("code"),Or=o("[0, 1, 6, 7, 12, 13, 2, 3, 8, 9, 14, 15, 4, 5, 10, 11, 0, 1]"),Ar=i(),Ht=s("p"),Cr=o("For some reason, that\u2019s not going to roll their boat. This class is there to solve that problem."),Lr=i(),X=s("div"),b(me.$$.fragment),Ir=i(),ge=s("p"),Ur=o("Add "),Nt=s("code"),zr=o("arrays"),Sr=o(` to the internal storage, Will initialize the storage to the full size at the first arrays passed so that if we\u2019re bound to get an OOM, it happens at the beginning.`),Hr=i(),Z=s("div"),b(be.$$.fragment),Nr=i(),Gt=s("p"),Gr=o(`Return the properly gathered arrays and truncate to the number of samples (since the sampler added some extras to get each process a dataset of the same length).`),Na=i(),R=s("h2"),ee=s("a"),Vt=s("span"),b(ve.$$.fragment),Vr=i(),Mt=s("span"),Mr=o("Distributed Evaluation"),Ga=i(),E=s("div"),b(_e.$$.fragment),Rr=i(),we=s("p"),Fr=o("This subclass of "),Rt=s("code"),qr=o("argparse.ArgumentParser"),Br=o(" uses type hints on dataclasses to generate arguments."),Kr=i(),$e=s("p"),Yr=o(`The class is designed to play well with the native argparse. In particular, you can add more (non-dataclass backed) arguments to the parser after initialization and you\u2019ll get the output back after parsing as an additional namespace. Optional: To create sub argument groups use the `),Ft=s("code"),Wr=o("_argument_group_name"),Jr=o(" attribute in the dataclass."),Qr=i(),A=s("div"),b(ye.$$.fragment),Xr=i(),qt=s("p"),Zr=o("Parse command-line args into instances of the specified dataclass types."),en=i(),Ee=s("p"),tn=o("This relies on argparse\u2019s "),Bt=s("code"),an=o("ArgumentParser.parse_known_args"),sn=o(`. See the doc at: docs.python.org/3.7/library/argparse.html#argparse.ArgumentParser.parse_args`),rn=i(),te=s("div"),b(De.$$.fragment),nn=i(),je=s("p"),on=o("Alternative helper method that does not use "),Kt=s("code"),ln=o("argparse"),cn=o(` at all, instead uses a dict and populating the dataclass types.`),pn=i(),ae=s("div"),b(Pe.$$.fragment),hn=i(),ke=s("p"),dn=o("Alternative helper method that does not use "),Yt=s("code"),un=o("argparse"),fn=o(` at all, instead loading a json file and populating the dataclass types.`),Va=i(),F=s("h2"),se=s("a"),Wt=s("span"),b(Te.$$.fragment),mn=i(),Jt=s("span"),gn=o("Debug Utilities"),Ma=i(),p=s("div"),b(xe.$$.fragment),bn=i(),q=s("p"),vn=o(`This debug class helps detect and understand where the model starts getting very large or very small, and more importantly `),Qt=s("code"),_n=o("nan"),wn=o(" or "),Xt=s("code"),$n=o("inf"),yn=o(" weight and activation elements."),En=i(),Zt=s("p"),Dn=o("There are 2 working modes:"),jn=i(),Oe=s("ol"),ea=s("li"),Pn=o("Underflow/overflow detection (default)"),kn=i(),ta=s("li"),Tn=o("Specific batch absolute min/max tracing without detection"),xn=i(),aa=s("p"),On=o("Mode 1: Underflow/overflow detection"),An=i(),sa=s("p"),Cn=o("To activate the underflow/overflow detection, initialize the object with the model :"),Ln=i(),b(Ae.$$.fragment),In=i(),x=s("p"),Un=o("then run the training as normal and if "),ra=s("code"),zn=o("nan"),Sn=o(" or "),na=s("code"),Hn=o("inf"),Nn=o(` gets detected in at least one of the weight, input or output elements this module will throw an exception and will print `),oa=s("code"),Gn=o("max_frames_to_save"),Vn=o(` frames that lead to this event, each frame reporting`),Mn=i(),Ce=s("ol"),Le=s("li"),Rn=o("the fully qualified module name plus the class name whose "),la=s("code"),Fn=o("forward"),qn=o(" was run"),Bn=i(),ia=s("li"),Kn=o("the absolute min and max value of all elements for each module weights, and the inputs and output"),Yn=i(),Ie=s("p"),Wn=o("For example, here is the header and the last few frames in detection report for "),ca=s("code"),Jn=o("google/mt5-small"),Qn=o(` run in fp16 mixed precision :`),Xn=i(),b(Ue.$$.fragment),Zn=i(),B=s("p"),eo=o("You can see here, that "),pa=s("code"),to=o("T5DenseGatedGeluDense.forward"),ao=o(` resulted in output activations, whose absolute max value was around 62.7K, which is very close to fp16\u2019s top limit of 64K. In the next frame we have `),ha=s("code"),so=o("Dropout"),ro=o(` which renormalizes the weights, after it zeroed some of the elements, which pushes the absolute max value to more than 64K, and we get an overlow.`),no=i(),da=s("p"),oo=o(`As you can see it\u2019s the previous frames that we need to look into when the numbers start going into very large for fp16 numbers.`),lo=i(),ze=s("p"),io=o("The tracking is done in a forward hook, which gets invoked immediately after "),ua=s("code"),co=o("forward"),po=o(" has completed."),ho=i(),fa=s("p"),uo=o("By default the last 21 frames are printed. You can change the default to adjust for your needs. For example :"),fo=i(),b(Se.$$.fragment),mo=i(),ma=s("p"),go=o(`To validate that you have set up this debugging feature correctly, and you intend to use it in a training that may take hours to complete, first run it with normal tracing enabled for one of a few batches as explained in the next section.`),bo=i(),ga=s("p"),vo=o("Mode 2. Specific batch absolute min/max tracing without detection"),_o=i(),ba=s("p"),wo=o("The second work mode is per-batch tracing with the underflow/overflow detection feature turned off."),$o=i(),He=s("p"),yo=o("Let\u2019s say you want to watch the absolute min and max values for all the ingredients of each "),va=s("code"),Eo=o("forward"),Do=o(` call of a given batch, and only do that for batches 1 and 3. Then you instantiate this class as :`),jo=i(),b(Ne.$$.fragment),Po=i(),_a=s("p"),ko=o("And now full batches 1 and 3 will be traced using the same format as explained above. Batches are 0-indexed."),To=i(),wa=s("p"),xo=o(`This is helpful if you know that the program starts misbehaving after a certain batch number, so you can fast-forward right to that area.`),Oo=i(),$a=s("p"),Ao=o("Early stopping:"),Co=i(),ya=s("p"),Lo=o("You can also specify the batch number after which to stop the training, with :"),Io=i(),b(Ge.$$.fragment),Uo=i(),Ea=s("p"),zo=o("This feature is mainly useful in the tracing mode, but you can use it for any mode."),So=i(),Je=s("p"),Da=s("strong"),Ho=o("Performance"),No=o(":"),Go=i(),K=s("p"),Vo=o("As this module measures absolute "),ja=s("code"),Mo=o("min"),Ro=o("/`"),Pa=s("code"),Fo=o("max"),qo=o(` of each weight of the model on every forward it\u2019ll slow the training down. Therefore remember to turn it off once the debugging needs have been met.`),this.h()},l(a){const f=Li('[data-svelte="svelte-1phssyn"]',document.head);O=r(f,"META",{name:!0,content:!0}),f.forEach(t),Ve=c(a),P=r(a,"H1",{class:!0});var Fa=n(P);T=r(Fa,"A",{id:!0,class:!0,href:!0});var Zo=n(T);rt=r(Zo,"SPAN",{});var el=n(rt);v(ne.$$.fragment,el),el.forEach(t),Zo.forEach(t),_s=c(Fa),nt=r(Fa,"SPAN",{});var tl=n(nt);ws=l(tl,"Utilities for Trainer"),tl.forEach(t),Fa.forEach(t),Ta=c(a),Y=r(a,"P",{});var qa=n(Y);$s=l(qa,"This page lists all the utility functions used by "),Me=r(qa,"A",{href:!0});var al=n(Me);ys=l(al,"Trainer"),al.forEach(t),Es=l(qa,"."),qa.forEach(t),xa=c(a),Re=r(a,"P",{});var sl=n(Re);Ds=l(sl,"Most of those are only useful if you are studying the code of the Trainer in the library."),sl.forEach(t),Oa=c(a),L=r(a,"H2",{class:!0});var Ba=n(L);W=r(Ba,"A",{id:!0,class:!0,href:!0});var rl=n(W);ot=r(rl,"SPAN",{});var nl=n(ot);v(oe.$$.fragment,nl),nl.forEach(t),rl.forEach(t),js=c(Ba),lt=r(Ba,"SPAN",{});var ol=n(lt);Ps=l(ol,"Utilities"),ol.forEach(t),Ba.forEach(t),Aa=c(a),I=r(a,"DIV",{class:!0});var Ka=n(I);v(le.$$.fragment,Ka),ks=c(Ka),it=r(Ka,"P",{});var ll=n(it);Ts=l(ll,"Evaluation output (always contains labels), to be used to compute metrics."),ll.forEach(t),Ka.forEach(t),Ca=c(a),U=r(a,"DIV",{class:!0});var Ya=n(U);v(ie.$$.fragment,Ya),xs=c(Ya),ct=r(Ya,"P",{});var il=n(ct);Os=l(il,"An enumeration."),il.forEach(t),Ya.forEach(t),La=c(a),z=r(a,"DIV",{class:!0});var Wa=n(z);v(ce.$$.fragment,Wa),As=c(Wa),D=r(Wa,"P",{});var C=n(D);Cs=l(C,"Helper function for reproducible behavior to set the seed in "),pt=r(C,"CODE",{});var cl=n(pt);Ls=l(cl,"random"),cl.forEach(t),Is=l(C,", "),ht=r(C,"CODE",{});var pl=n(ht);Us=l(pl,"numpy"),pl.forEach(t),zs=l(C,", "),dt=r(C,"CODE",{});var hl=n(dt);Ss=l(hl,"torch"),hl.forEach(t),Hs=l(C," and/or "),ut=r(C,"CODE",{});var dl=n(ut);Ns=l(dl,"tf"),dl.forEach(t),Gs=l(C," (if installed)."),C.forEach(t),Wa.forEach(t),Ia=c(a),S=r(a,"DIV",{class:!0});var Ja=n(S);v(pe.$$.fragment,Ja),Vs=c(Ja),ft=r(Ja,"P",{});var ul=n(ft);Ms=l(ul,"Decorator to make all processes in distributed training wait for each local_master to do something."),ul.forEach(t),Ja.forEach(t),Ua=c(a),H=r(a,"H2",{class:!0});var Qa=n(H);J=r(Qa,"A",{id:!0,class:!0,href:!0});var fl=n(J);mt=r(fl,"SPAN",{});var ml=n(mt);v(he.$$.fragment,ml),ml.forEach(t),fl.forEach(t),Rs=c(Qa),gt=r(Qa,"SPAN",{});var gl=n(gt);Fs=l(gl,"Callbacks internals"),gl.forEach(t),Qa.forEach(t),za=c(a),N=r(a,"DIV",{class:!0});var Xa=n(N);v(de.$$.fragment,Xa),qs=c(Xa),bt=r(Xa,"P",{});var bl=n(bt);Bs=l(bl,"Internal class that just calls the list of callbacks in order."),bl.forEach(t),Xa.forEach(t),Sa=c(a),G=r(a,"H2",{class:!0});var Za=n(G);Q=r(Za,"A",{id:!0,class:!0,href:!0});var vl=n(Q);vt=r(vl,"SPAN",{});var _l=n(vt);v(ue.$$.fragment,_l),_l.forEach(t),vl.forEach(t),Ks=c(Za),_t=r(Za,"SPAN",{});var wl=n(_t);Ys=l(wl,"Distributed Evaluation"),wl.forEach(t),Za.forEach(t),Ha=c(a),u=r(a,"DIV",{class:!0});var g=n(u);v(fe.$$.fragment,g),Ws=c(g),wt=r(g,"P",{});var $l=n(wt);Js=l($l,"A class responsible for properly gathering tensors (or nested list/tuple of tensors) on the CPU by chunks."),$l.forEach(t),Qs=c(g),$t=r(g,"P",{});var yl=n($t);Xs=l(yl,`If our dataset has 16 samples with a batch size of 2 on 3 processes and we gather then transfer on CPU at every step, our sampler will generate the following indices:`),yl.forEach(t),Zs=c(g),yt=r(g,"P",{});var El=n(yt);Et=r(El,"CODE",{});var Dl=n(Et);er=l(Dl,"[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1]"),Dl.forEach(t),El.forEach(t),tr=c(g),Dt=r(g,"P",{});var jl=n(Dt);ar=l(jl,`to get something of size a multiple of 3 (so that each process gets the same dataset length). Then process 0, 1 and 2 will be responsible of making predictions for the following samples:`),jl.forEach(t),sr=c(g),V=r(g,"UL",{});var Qe=n(V);Fe=r(Qe,"LI",{});var Bo=n(Fe);rr=l(Bo,"P0: "),jt=r(Bo,"CODE",{});var Pl=n(jt);nr=l(Pl,"[0, 1, 2, 3, 4, 5]"),Pl.forEach(t),Bo.forEach(t),or=c(Qe),qe=r(Qe,"LI",{});var Ko=n(qe);lr=l(Ko,"P1: "),Pt=r(Ko,"CODE",{});var kl=n(Pt);ir=l(kl,"[6, 7, 8, 9, 10, 11]"),kl.forEach(t),Ko.forEach(t),cr=c(Qe),Be=r(Qe,"LI",{});var Yo=n(Be);pr=l(Yo,"P2: "),kt=r(Yo,"CODE",{});var Tl=n(kt);hr=l(Tl,"[12, 13, 14, 15, 0, 1]"),Tl.forEach(t),Yo.forEach(t),Qe.forEach(t),dr=c(g),Tt=r(g,"P",{});var xl=n(Tt);ur=l(xl,"The first batch treated on each process will be"),xl.forEach(t),fr=c(g),M=r(g,"UL",{});var Xe=n(M);Ke=r(Xe,"LI",{});var Wo=n(Ke);mr=l(Wo,"P0: "),xt=r(Wo,"CODE",{});var Ol=n(xt);gr=l(Ol,"[0, 1]"),Ol.forEach(t),Wo.forEach(t),br=c(Xe),Ye=r(Xe,"LI",{});var Jo=n(Ye);vr=l(Jo,"P1: "),Ot=r(Jo,"CODE",{});var Al=n(Ot);_r=l(Al,"[6, 7]"),Al.forEach(t),Jo.forEach(t),wr=c(Xe),We=r(Xe,"LI",{});var Qo=n(We);$r=l(Qo,"P2: "),At=r(Qo,"CODE",{});var Cl=n(At);yr=l(Cl,"[12, 13]"),Cl.forEach(t),Qo.forEach(t),Xe.forEach(t),Er=c(g),Ct=r(g,"P",{});var Ll=n(Ct);Dr=l(Ll,`So if we gather at the end of the first batch, we will get a tensor (nested list/tuple of tensor) corresponding to the following indices:`),Ll.forEach(t),jr=c(g),Lt=r(g,"P",{});var Il=n(Lt);It=r(Il,"CODE",{});var Ul=n(It);Pr=l(Ul,"[0, 1, 6, 7, 12, 13]"),Ul.forEach(t),Il.forEach(t),kr=c(g),Ut=r(g,"P",{});var zl=n(Ut);Tr=l(zl,`If we directly concatenate our results without taking any precautions, the user will then get the predictions for the indices in this order at the end of the prediction loop:`),zl.forEach(t),xr=c(g),zt=r(g,"P",{});var Sl=n(zt);St=r(Sl,"CODE",{});var Hl=n(St);Or=l(Hl,"[0, 1, 6, 7, 12, 13, 2, 3, 8, 9, 14, 15, 4, 5, 10, 11, 0, 1]"),Hl.forEach(t),Sl.forEach(t),Ar=c(g),Ht=r(g,"P",{});var Nl=n(Ht);Cr=l(Nl,"For some reason, that\u2019s not going to roll their boat. This class is there to solve that problem."),Nl.forEach(t),Lr=c(g),X=r(g,"DIV",{class:!0});var es=n(X);v(me.$$.fragment,es),Ir=c(es),ge=r(es,"P",{});var ts=n(ge);Ur=l(ts,"Add "),Nt=r(ts,"CODE",{});var Gl=n(Nt);zr=l(Gl,"arrays"),Gl.forEach(t),Sr=l(ts,` to the internal storage, Will initialize the storage to the full size at the first arrays passed so that if we\u2019re bound to get an OOM, it happens at the beginning.`),ts.forEach(t),es.forEach(t),Hr=c(g),Z=r(g,"DIV",{class:!0});var as=n(Z);v(be.$$.fragment,as),Nr=c(as),Gt=r(as,"P",{});var Vl=n(Gt);Gr=l(Vl,`Return the properly gathered arrays and truncate to the number of samples (since the sampler added some extras to get each process a dataset of the same length).`),Vl.forEach(t),as.forEach(t),g.forEach(t),Na=c(a),R=r(a,"H2",{class:!0});var ss=n(R);ee=r(ss,"A",{id:!0,class:!0,href:!0});var Ml=n(ee);Vt=r(Ml,"SPAN",{});var Rl=n(Vt);v(ve.$$.fragment,Rl),Rl.forEach(t),Ml.forEach(t),Vr=c(ss),Mt=r(ss,"SPAN",{});var Fl=n(Mt);Mr=l(Fl,"Distributed Evaluation"),Fl.forEach(t),ss.forEach(t),Ga=c(a),E=r(a,"DIV",{class:!0});var k=n(E);v(_e.$$.fragment,k),Rr=c(k),we=r(k,"P",{});var rs=n(we);Fr=l(rs,"This subclass of "),Rt=r(rs,"CODE",{});var ql=n(Rt);qr=l(ql,"argparse.ArgumentParser"),ql.forEach(t),Br=l(rs," uses type hints on dataclasses to generate arguments."),rs.forEach(t),Kr=c(k),$e=r(k,"P",{});var ns=n($e);Yr=l(ns,`The class is designed to play well with the native argparse. In particular, you can add more (non-dataclass backed) arguments to the parser after initialization and you\u2019ll get the output back after parsing as an additional namespace. Optional: To create sub argument groups use the `),Ft=r(ns,"CODE",{});var Bl=n(Ft);Wr=l(Bl,"_argument_group_name"),Bl.forEach(t),Jr=l(ns," attribute in the dataclass."),ns.forEach(t),Qr=c(k),A=r(k,"DIV",{class:!0});var Ze=n(A);v(ye.$$.fragment,Ze),Xr=c(Ze),qt=r(Ze,"P",{});var Kl=n(qt);Zr=l(Kl,"Parse command-line args into instances of the specified dataclass types."),Kl.forEach(t),en=c(Ze),Ee=r(Ze,"P",{});var os=n(Ee);tn=l(os,"This relies on argparse\u2019s "),Bt=r(os,"CODE",{});var Yl=n(Bt);an=l(Yl,"ArgumentParser.parse_known_args"),Yl.forEach(t),sn=l(os,`. See the doc at: docs.python.org/3.7/library/argparse.html#argparse.ArgumentParser.parse_args`),os.forEach(t),Ze.forEach(t),rn=c(k),te=r(k,"DIV",{class:!0});var ls=n(te);v(De.$$.fragment,ls),nn=c(ls),je=r(ls,"P",{});var is=n(je);on=l(is,"Alternative helper method that does not use "),Kt=r(is,"CODE",{});var Wl=n(Kt);ln=l(Wl,"argparse"),Wl.forEach(t),cn=l(is,` at all, instead uses a dict and populating the dataclass types.`),is.forEach(t),ls.forEach(t),pn=c(k),ae=r(k,"DIV",{class:!0});var cs=n(ae);v(Pe.$$.fragment,cs),hn=c(cs),ke=r(cs,"P",{});var ps=n(ke);dn=l(ps,"Alternative helper method that does not use "),Yt=r(ps,"CODE",{});var Jl=n(Yt);un=l(Jl,"argparse"),Jl.forEach(t),fn=l(ps,` at all, instead loading a json file and populating the dataclass types.`),ps.forEach(t),cs.forEach(t),k.forEach(t),Va=c(a),F=r(a,"H2",{class:!0});var hs=n(F);se=r(hs,"A",{id:!0,class:!0,href:!0});var Ql=n(se);Wt=r(Ql,"SPAN",{});var Xl=n(Wt);v(Te.$$.fragment,Xl),Xl.forEach(t),Ql.forEach(t),mn=c(hs),Jt=r(hs,"SPAN",{});var Zl=n(Jt);gn=l(Zl,"Debug Utilities"),Zl.forEach(t),hs.forEach(t),Ma=c(a),p=r(a,"DIV",{class:!0});var h=n(p);v(xe.$$.fragment,h),bn=c(h),q=r(h,"P",{});var et=n(q);vn=l(et,`This debug class helps detect and understand where the model starts getting very large or very small, and more importantly `),Qt=r(et,"CODE",{});var ei=n(Qt);_n=l(ei,"nan"),ei.forEach(t),wn=l(et," or "),Xt=r(et,"CODE",{});var ti=n(Xt);$n=l(ti,"inf"),ti.forEach(t),yn=l(et," weight and activation elements."),et.forEach(t),En=c(h),Zt=r(h,"P",{});var ai=n(Zt);Dn=l(ai,"There are 2 working modes:"),ai.forEach(t),jn=c(h),Oe=r(h,"OL",{});var ds=n(Oe);ea=r(ds,"LI",{});var si=n(ea);Pn=l(si,"Underflow/overflow detection (default)"),si.forEach(t),kn=c(ds),ta=r(ds,"LI",{});var ri=n(ta);Tn=l(ri,"Specific batch absolute min/max tracing without detection"),ri.forEach(t),ds.forEach(t),xn=c(h),aa=r(h,"P",{});var ni=n(aa);On=l(ni,"Mode 1: Underflow/overflow detection"),ni.forEach(t),An=c(h),sa=r(h,"P",{});var oi=n(sa);Cn=l(oi,"To activate the underflow/overflow detection, initialize the object with the model :"),oi.forEach(t),Ln=c(h),v(Ae.$$.fragment,h),In=c(h),x=r(h,"P",{});var re=n(x);Un=l(re,"then run the training as normal and if "),ra=r(re,"CODE",{});var li=n(ra);zn=l(li,"nan"),li.forEach(t),Sn=l(re," or "),na=r(re,"CODE",{});var ii=n(na);Hn=l(ii,"inf"),ii.forEach(t),Nn=l(re,` gets detected in at least one of the weight, input or output elements this module will throw an exception and will print `),oa=r(re,"CODE",{});var ci=n(oa);Gn=l(ci,"max_frames_to_save"),ci.forEach(t),Vn=l(re,` frames that lead to this event, each frame reporting`),re.forEach(t),Mn=c(h),Ce=r(h,"OL",{});var us=n(Ce);Le=r(us,"LI",{});var fs=n(Le);Rn=l(fs,"the fully qualified module name plus the class name whose "),la=r(fs,"CODE",{});var pi=n(la);Fn=l(pi,"forward"),pi.forEach(t),qn=l(fs," was run"),fs.forEach(t),Bn=c(us),ia=r(us,"LI",{});var hi=n(ia);Kn=l(hi,"the absolute min and max value of all elements for each module weights, and the inputs and output"),hi.forEach(t),us.forEach(t),Yn=c(h),Ie=r(h,"P",{});var ms=n(Ie);Wn=l(ms,"For example, here is the header and the last few frames in detection report for "),ca=r(ms,"CODE",{});var di=n(ca);Jn=l(di,"google/mt5-small"),di.forEach(t),Qn=l(ms,` run in fp16 mixed precision :`),ms.forEach(t),Xn=c(h),v(Ue.$$.fragment,h),Zn=c(h),B=r(h,"P",{});var tt=n(B);eo=l(tt,"You can see here, that "),pa=r(tt,"CODE",{});var ui=n(pa);to=l(ui,"T5DenseGatedGeluDense.forward"),ui.forEach(t),ao=l(tt,` resulted in output activations, whose absolute max value was around 62.7K, which is very close to fp16\u2019s top limit of 64K. In the next frame we have `),ha=r(tt,"CODE",{});var fi=n(ha);so=l(fi,"Dropout"),fi.forEach(t),ro=l(tt,` which renormalizes the weights, after it zeroed some of the elements, which pushes the absolute max value to more than 64K, and we get an overlow.`),tt.forEach(t),no=c(h),da=r(h,"P",{});var mi=n(da);oo=l(mi,`As you can see it\u2019s the previous frames that we need to look into when the numbers start going into very large for fp16 numbers.`),mi.forEach(t),lo=c(h),ze=r(h,"P",{});var gs=n(ze);io=l(gs,"The tracking is done in a forward hook, which gets invoked immediately after "),ua=r(gs,"CODE",{});var gi=n(ua);co=l(gi,"forward"),gi.forEach(t),po=l(gs," has completed."),gs.forEach(t),ho=c(h),fa=r(h,"P",{});var bi=n(fa);uo=l(bi,"By default the last 21 frames are printed. You can change the default to adjust for your needs. For example :"),bi.forEach(t),fo=c(h),v(Se.$$.fragment,h),mo=c(h),ma=r(h,"P",{});var vi=n(ma);go=l(vi,`To validate that you have set up this debugging feature correctly, and you intend to use it in a training that may take hours to complete, first run it with normal tracing enabled for one of a few batches as explained in the next section.`),vi.forEach(t),bo=c(h),ga=r(h,"P",{});var _i=n(ga);vo=l(_i,"Mode 2. Specific batch absolute min/max tracing without detection"),_i.forEach(t),_o=c(h),ba=r(h,"P",{});var wi=n(ba);wo=l(wi,"The second work mode is per-batch tracing with the underflow/overflow detection feature turned off."),wi.forEach(t),$o=c(h),He=r(h,"P",{});var bs=n(He);yo=l(bs,"Let\u2019s say you want to watch the absolute min and max values for all the ingredients of each "),va=r(bs,"CODE",{});var $i=n(va);Eo=l($i,"forward"),$i.forEach(t),Do=l(bs,` call of a given batch, and only do that for batches 1 and 3. Then you instantiate this class as :`),bs.forEach(t),jo=c(h),v(Ne.$$.fragment,h),Po=c(h),_a=r(h,"P",{});var yi=n(_a);ko=l(yi,"And now full batches 1 and 3 will be traced using the same format as explained above. Batches are 0-indexed."),yi.forEach(t),To=c(h),wa=r(h,"P",{});var Ei=n(wa);xo=l(Ei,`This is helpful if you know that the program starts misbehaving after a certain batch number, so you can fast-forward right to that area.`),Ei.forEach(t),Oo=c(h),$a=r(h,"P",{});var Di=n($a);Ao=l(Di,"Early stopping:"),Di.forEach(t),Co=c(h),ya=r(h,"P",{});var ji=n(ya);Lo=l(ji,"You can also specify the batch number after which to stop the training, with :"),ji.forEach(t),Io=c(h),v(Ge.$$.fragment,h),Uo=c(h),Ea=r(h,"P",{});var Pi=n(Ea);zo=l(Pi,"This feature is mainly useful in the tracing mode, but you can use it for any mode."),Pi.forEach(t),So=c(h),Je=r(h,"P",{});var Xo=n(Je);Da=r(Xo,"STRONG",{});var ki=n(Da);Ho=l(ki,"Performance"),ki.forEach(t),No=l(Xo,":"),Xo.forEach(t),Go=c(h),K=r(h,"P",{});var at=n(K);Vo=l(at,"As this module measures absolute "),ja=r(at,"CODE",{});var Ti=n(ja);Mo=l(Ti,"min"),Ti.forEach(t),Ro=l(at,"/`"),Pa=r(at,"CODE",{});var xi=n(Pa);Fo=l(xi,"max"),xi.forEach(t),qo=l(at,` of each weight of the model on every forward it\u2019ll slow the training down. Therefore remember to turn it off once the debugging needs have been met.`),at.forEach(t),h.forEach(t),this.h()},h(){d(O,"name","hf:doc:metadata"),d(O,"content",JSON.stringify(zi)),d(T,"id","utilities-for-trainer"),d(T,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(T,"href","#utilities-for-trainer"),d(P,"class","relative group"),d(Me,"href","/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer"),d(W,"id","transformers.EvalPrediction"),d(W,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(W,"href","#transformers.EvalPrediction"),d(L,"class","relative group"),d(I,"class","docstring"),d(U,"class","docstring"),d(z,"class","docstring"),d(S,"class","docstring"),d(J,"id","transformers.trainer_callback.CallbackHandler"),d(J,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(J,"href","#transformers.trainer_callback.CallbackHandler"),d(H,"class","relative group"),d(N,"class","docstring"),d(Q,"id","transformers.trainer_pt_utils.DistributedTensorGatherer"),d(Q,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Q,"href","#transformers.trainer_pt_utils.DistributedTensorGatherer"),d(G,"class","relative group"),d(X,"class","docstring"),d(Z,"class","docstring"),d(u,"class","docstring"),d(ee,"id","transformers.HfArgumentParser"),d(ee,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(ee,"href","#transformers.HfArgumentParser"),d(R,"class","relative group"),d(A,"class","docstring"),d(te,"class","docstring"),d(ae,"class","docstring"),d(E,"class","docstring"),d(se,"id","transformers.debug_utils.DebugUnderflowOverflow"),d(se,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(se,"href","#transformers.debug_utils.DebugUnderflowOverflow"),d(F,"class","relative group"),d(p,"class","docstring")},m(a,f){e(document.head,O),m(a,Ve,f),m(a,P,f),e(P,T),e(T,rt),_(ne,rt,null),e(P,_s),e(P,nt),e(nt,ws),m(a,Ta,f),m(a,Y,f),e(Y,$s),e(Y,Me),e(Me,ys),e(Y,Es),m(a,xa,f),m(a,Re,f),e(Re,Ds),m(a,Oa,f),m(a,L,f),e(L,W),e(W,ot),_(oe,ot,null),e(L,js),e(L,lt),e(lt,Ps),m(a,Aa,f),m(a,I,f),_(le,I,null),e(I,ks),e(I,it),e(it,Ts),m(a,Ca,f),m(a,U,f),_(ie,U,null),e(U,xs),e(U,ct),e(ct,Os),m(a,La,f),m(a,z,f),_(ce,z,null),e(z,As),e(z,D),e(D,Cs),e(D,pt),e(pt,Ls),e(D,Is),e(D,ht),e(ht,Us),e(D,zs),e(D,dt),e(dt,Ss),e(D,Hs),e(D,ut),e(ut,Ns),e(D,Gs),m(a,Ia,f),m(a,S,f),_(pe,S,null),e(S,Vs),e(S,ft),e(ft,Ms),m(a,Ua,f),m(a,H,f),e(H,J),e(J,mt),_(he,mt,null),e(H,Rs),e(H,gt),e(gt,Fs),m(a,za,f),m(a,N,f),_(de,N,null),e(N,qs),e(N,bt),e(bt,Bs),m(a,Sa,f),m(a,G,f),e(G,Q),e(Q,vt),_(ue,vt,null),e(G,Ks),e(G,_t),e(_t,Ys),m(a,Ha,f),m(a,u,f),_(fe,u,null),e(u,Ws),e(u,wt),e(wt,Js),e(u,Qs),e(u,$t),e($t,Xs),e(u,Zs),e(u,yt),e(yt,Et),e(Et,er),e(u,tr),e(u,Dt),e(Dt,ar),e(u,sr),e(u,V),e(V,Fe),e(Fe,rr),e(Fe,jt),e(jt,nr),e(V,or),e(V,qe),e(qe,lr),e(qe,Pt),e(Pt,ir),e(V,cr),e(V,Be),e(Be,pr),e(Be,kt),e(kt,hr),e(u,dr),e(u,Tt),e(Tt,ur),e(u,fr),e(u,M),e(M,Ke),e(Ke,mr),e(Ke,xt),e(xt,gr),e(M,br),e(M,Ye),e(Ye,vr),e(Ye,Ot),e(Ot,_r),e(M,wr),e(M,We),e(We,$r),e(We,At),e(At,yr),e(u,Er),e(u,Ct),e(Ct,Dr),e(u,jr),e(u,Lt),e(Lt,It),e(It,Pr),e(u,kr),e(u,Ut),e(Ut,Tr),e(u,xr),e(u,zt),e(zt,St),e(St,Or),e(u,Ar),e(u,Ht),e(Ht,Cr),e(u,Lr),e(u,X),_(me,X,null),e(X,Ir),e(X,ge),e(ge,Ur),e(ge,Nt),e(Nt,zr),e(ge,Sr),e(u,Hr),e(u,Z),_(be,Z,null),e(Z,Nr),e(Z,Gt),e(Gt,Gr),m(a,Na,f),m(a,R,f),e(R,ee),e(ee,Vt),_(ve,Vt,null),e(R,Vr),e(R,Mt),e(Mt,Mr),m(a,Ga,f),m(a,E,f),_(_e,E,null),e(E,Rr),e(E,we),e(we,Fr),e(we,Rt),e(Rt,qr),e(we,Br),e(E,Kr),e(E,$e),e($e,Yr),e($e,Ft),e(Ft,Wr),e($e,Jr),e(E,Qr),e(E,A),_(ye,A,null),e(A,Xr),e(A,qt),e(qt,Zr),e(A,en),e(A,Ee),e(Ee,tn),e(Ee,Bt),e(Bt,an),e(Ee,sn),e(E,rn),e(E,te),_(De,te,null),e(te,nn),e(te,je),e(je,on),e(je,Kt),e(Kt,ln),e(je,cn),e(E,pn),e(E,ae),_(Pe,ae,null),e(ae,hn),e(ae,ke),e(ke,dn),e(ke,Yt),e(Yt,un),e(ke,fn),m(a,Va,f),m(a,F,f),e(F,se),e(se,Wt),_(Te,Wt,null),e(F,mn),e(F,Jt),e(Jt,gn),m(a,Ma,f),m(a,p,f),_(xe,p,null),e(p,bn),e(p,q),e(q,vn),e(q,Qt),e(Qt,_n),e(q,wn),e(q,Xt),e(Xt,$n),e(q,yn),e(p,En),e(p,Zt),e(Zt,Dn),e(p,jn),e(p,Oe),e(Oe,ea),e(ea,Pn),e(Oe,kn),e(Oe,ta),e(ta,Tn),e(p,xn),e(p,aa),e(aa,On),e(p,An),e(p,sa),e(sa,Cn),e(p,Ln),_(Ae,p,null),e(p,In),e(p,x),e(x,Un),e(x,ra),e(ra,zn),e(x,Sn),e(x,na),e(na,Hn),e(x,Nn),e(x,oa),e(oa,Gn),e(x,Vn),e(p,Mn),e(p,Ce),e(Ce,Le),e(Le,Rn),e(Le,la),e(la,Fn),e(Le,qn),e(Ce,Bn),e(Ce,ia),e(ia,Kn),e(p,Yn),e(p,Ie),e(Ie,Wn),e(Ie,ca),e(ca,Jn),e(Ie,Qn),e(p,Xn),_(Ue,p,null),e(p,Zn),e(p,B),e(B,eo),e(B,pa),e(pa,to),e(B,ao),e(B,ha),e(ha,so),e(B,ro),e(p,no),e(p,da),e(da,oo),e(p,lo),e(p,ze),e(ze,io),e(ze,ua),e(ua,co),e(ze,po),e(p,ho),e(p,fa),e(fa,uo),e(p,fo),_(Se,p,null),e(p,mo),e(p,ma),e(ma,go),e(p,bo),e(p,ga),e(ga,vo),e(p,_o),e(p,ba),e(ba,wo),e(p,$o),e(p,He),e(He,yo),e(He,va),e(va,Eo),e(He,Do),e(p,jo),_(Ne,p,null),e(p,Po),e(p,_a),e(_a,ko),e(p,To),e(p,wa),e(wa,xo),e(p,Oo),e(p,$a),e($a,Ao),e(p,Co),e(p,ya),e(ya,Lo),e(p,Io),_(Ge,p,null),e(p,Uo),e(p,Ea),e(Ea,zo),e(p,So),e(p,Je),e(Je,Da),e(Da,Ho),e(Je,No),e(p,Go),e(p,K),e(K,Vo),e(K,ja),e(ja,Mo),e(K,Ro),e(K,Pa),e(Pa,Fo),e(K,qo),Ra=!0},p:Ii,i(a){Ra||(w(ne.$$.fragment,a),w(oe.$$.fragment,a),w(le.$$.fragment,a),w(ie.$$.fragment,a),w(ce.$$.fragment,a),w(pe.$$.fragment,a),w(he.$$.fragment,a),w(de.$$.fragment,a),w(ue.$$.fragment,a),w(fe.$$.fragment,a),w(me.$$.fragment,a),w(be.$$.fragment,a),w(ve.$$.fragment,a),w(_e.$$.fragment,a),w(ye.$$.fragment,a),w(De.$$.fragment,a),w(Pe.$$.fragment,a),w(Te.$$.fragment,a),w(xe.$$.fragment,a),w(Ae.$$.fragment,a),w(Ue.$$.fragment,a),w(Se.$$.fragment,a),w(Ne.$$.fragment,a),w(Ge.$$.fragment,a),Ra=!0)},o(a){$(ne.$$.fragment,a),$(oe.$$.fragment,a),$(le.$$.fragment,a),$(ie.$$.fragment,a),$(ce.$$.fragment,a),$(pe.$$.fragment,a),$(he.$$.fragment,a),$(de.$$.fragment,a),$(ue.$$.fragment,a),$(fe.$$.fragment,a),$(me.$$.fragment,a),$(be.$$.fragment,a),$(ve.$$.fragment,a),$(_e.$$.fragment,a),$(ye.$$.fragment,a),$(De.$$.fragment,a),$(Pe.$$.fragment,a),$(Te.$$.fragment,a),$(xe.$$.fragment,a),$(Ae.$$.fragment,a),$(Ue.$$.fragment,a),$(Se.$$.fragment,a),$(Ne.$$.fragment,a),$(Ge.$$.fragment,a),Ra=!1},d(a){t(O),a&&t(Ve),a&&t(P),y(ne),a&&t(Ta),a&&t(Y),a&&t(xa),a&&t(Re),a&&t(Oa),a&&t(L),y(oe),a&&t(Aa),a&&t(I),y(le),a&&t(Ca),a&&t(U),y(ie),a&&t(La),a&&t(z),y(ce),a&&t(Ia),a&&t(S),y(pe),a&&t(Ua),a&&t(H),y(he),a&&t(za),a&&t(N),y(de),a&&t(Sa),a&&t(G),y(ue),a&&t(Ha),a&&t(u),y(fe),y(me),y(be),a&&t(Na),a&&t(R),y(ve),a&&t(Ga),a&&t(E),y(_e),y(ye),y(De),y(Pe),a&&t(Va),a&&t(F),y(Te),a&&t(Ma),a&&t(p),y(xe),y(Ae),y(Ue),y(Se),y(Ne),y(Ge)}}}const zi={local:"utilities-for-trainer",sections:[{local:"transformers.EvalPrediction",title:"Utilities"},{local:"transformers.trainer_callback.CallbackHandler",title:"Callbacks internals"},{local:"transformers.trainer_pt_utils.DistributedTensorGatherer",title:"Distributed Evaluation"},{local:"transformers.HfArgumentParser",title:"Distributed Evaluation"},{local:"transformers.debug_utils.DebugUnderflowOverflow",title:"Debug Utilities"}],title:"Utilities for Trainer"};function Si(vs,O,Ve){let{fw:P}=O;return vs.$$set=T=>{"fw"in T&&Ve(0,P=T.fw)},[P]}class Ri extends Oi{constructor(O){super();Ai(this,O,Si,Ui,Ci,{fw:0})}}export{Ri as default,zi as metadata};
420
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/internal/modeling_utils.mdx-1b04e493.js
import{S as Ri,i as Ui,s as Wi,e as s,k as l,w as h,t as a,M as Xi,c as n,d as o,m as d,a as r,x as g,h as i,b as f,F as t,g as m,y as _,q as v,o as $,B as b}from"../../chunks/vendor-4833417e.js";import{T as oo}from"../../chunks/Tip-fffd6df1.js";import{D as T}from"../../chunks/Docstring-4f315ed9.js";import{C as Ji}from"../../chunks/CodeBlock-6a3d1b46.js";import{I as so}from"../../chunks/IconCopyLink-4b81c553.js";import"../../chunks/CopyButton-dacfbfaf.js";function Yi(D){let p,k,u,y,A,E,G,O,L,j,S,P,Q,F,C,B,x;return{c(){p=s("p"),k=a("One of "),u=s("code"),y=a("start_states"),A=a(" or "),E=s("code"),G=a("start_positions"),O=a(" should be not "),L=s("code"),j=a("None"),S=a(". If both are set, "),P=s("code"),Q=a("start_positions"),F=a(` overrides `),C=s("code"),B=a("start_states"),x=a(".")},l(q){p=n(q,"P",{});var w=r(p);k=i(w,"One of "),u=n(w,"CODE",{});var I=r(u);y=i(I,"start_states"),I.forEach(o),A=i(w," or "),E=n(w,"CODE",{});var fe=r(E);G=i(fe,"start_positions"),fe.forEach(o),O=i(w," should be not "),L=n(w,"CODE",{});var K=r(L);j=i(K,"None"),K.forEach(o),S=i(w,". If both are set, "),P=n(w,"CODE",{});var ue=r(P);Q=i(ue,"start_positions"),ue.forEach(o),F=i(w,` overrides `),C=n(w,"CODE",{});var ie=r(C);B=i(ie,"start_states"),ie.forEach(o),x=i(w,"."),w.forEach(o)},m(q,w){m(q,p,w),t(p,k),t(p,u),t(u,y),t(p,A),t(p,E),t(E,G),t(p,O),t(p,L),t(L,j),t(p,S),t(p,P),t(P,Q),t(p,F),t(p,C),t(C,B),t(p,x)},d(q){q&&o(p)}}}function Zi(D){let p,k,u,y,A,E,G,O,L,j,S,P,Q,F,C,B,x;return{c(){p=s("p"),k=a("One of "),u=s("code"),y=a("start_states"),A=a(" or "),E=s("code"),G=a("start_positions"),O=a(" should be not "),L=s("code"),j=a("None"),S=a(". If both are set, "),P=s("code"),Q=a("start_positions"),F=a(` overrides `),C=s("code"),B=a("start_states"),x=a(".")},l(q){p=n(q,"P",{});var w=r(p);k=i(w,"One of "),u=n(w,"CODE",{});var I=r(u);y=i(I,"start_states"),I.forEach(o),A=i(w," or "),E=n(w,"CODE",{});var fe=r(E);G=i(fe,"start_positions"),fe.forEach(o),O=i(w," should be not "),L=n(w,"CODE",{});var K=r(L);j=i(K,"None"),K.forEach(o),S=i(w,". If both are set, "),P=n(w,"CODE",{});var ue=r(P);Q=i(ue,"start_positions"),ue.forEach(o),F=i(w,` overrides `),C=n(w,"CODE",{});var ie=r(C);B=i(ie,"start_states"),ie.forEach(o),x=i(w,"."),w.forEach(o)},m(q,w){m(q,p,w),t(p,k),t(p,u),t(u,y),t(p,A),t(p,E),t(E,G),t(p,O),t(p,L),t(L,j),t(p,S),t(p,P),t(P,Q),t(p,F),t(p,C),t(C,B),t(p,x)},d(q){q&&o(p)}}}function el(D){let p,k;return{c(){p=s("p"),k=a("This API is experimental and may have some slight breaking changes in the next releases.")},l(u){p=n(u,"P",{});var y=r(p);k=i(y,"This API is experimental and may have some slight breaking changes in the next releases."),y.forEach(o)},m(u,y){m(u,p,y),t(p,k)},d(u){u&&o(p)}}}function tl(D){let p,k;return{c(){p=s("p"),k=a("Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.")},l(u){p=n(u,"P",{});var y=r(p);k=i(y,"Any label of -100 will be ignored (along with the corresponding logits) in the loss computation."),y.forEach(o)},m(u,y){m(u,p,y),t(p,k)},d(u){u&&o(p)}}}function ol(D){let p,k;return{c(){p=s("p"),k=a("Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.")},l(u){p=n(u,"P",{});var y=r(p);k=i(y,"Any label of -100 will be ignored (along with the corresponding logits) in the loss computation."),y.forEach(o)},m(u,y){m(u,p,y),t(p,k)},d(u){u&&o(p)}}}function sl(D){let p,k;return{c(){p=s("p"),k=a("Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.")},l(u){p=n(u,"P",{});var y=r(p);k=i(y,"Any label of -100 will be ignored (along with the corresponding logits) in the loss computation."),y.forEach(o)},m(u,y){m(u,p,y),t(p,k)},d(u){u&&o(p)}}}function nl(D){let p,k,u,y,A,E,G,O,L,j,S,P,Q,F,C,B,x,q,w,I,fe,K,ue,ie,R,Ge,nn,no,rn,an,ro,ln,ps,U,Ke,dn,ao,cn,pn,St,Re,ms,W,Ue,mn,io,fn,un,Pe,We,hn,Ce,fs,X,Xe,gn,lo,_n,vn,ze,Je,$n,Se,us,he,Ye,bn,Ze,yn,Ft,wn,Tn,hs,J,et,kn,co,qn,En,At,tt,gs,Y,ot,xn,po,Dn,Ln,Fe,st,Pn,mo,Cn,_s,ge,Ae,fo,nt,zn,uo,Sn,vs,z,rt,Fn,N,An,ho,On,In,go,Nn,Hn,_o,Mn,Vn,vo,jn,Qn,Bn,H,Gn,$o,Kn,Rn,bo,Un,Wn,yo,Xn,Jn,wo,Yn,Zn,er,To,tr,or,at,$s,_e,it,sr,lt,nr,ko,rr,ar,bs,Z,dt,ir,qo,lr,dr,Eo,cr,ys,ee,ct,pr,xo,mr,fr,Do,ur,ws,te,pt,hr,Lo,gr,_r,Po,vr,Ts,ve,Oe,Co,mt,$r,zo,br,ks,oe,ft,yr,So,wr,Tr,Fo,kr,qs,M,ut,qr,Ao,Er,xr,Oo,Dr,Lr,le,ht,Pr,Io,Cr,zr,gt,Sr,_t,Fr,Ar,Es,se,vt,Or,No,Ir,Nr,de,$t,Hr,Ho,Mr,Vr,Ie,xs,$e,Ne,Mo,bt,jr,Vo,Qr,Ds,ne,yt,Br,jo,Gr,Kr,He,Ls,re,wt,Rr,Qo,Ur,Wr,Me,Ps,be,Tt,Xr,Bo,Jr,Cs,ye,kt,Yr,Go,Zr,zs,we,qt,ea,Ko,ta,Ss,ae,Et,oa,Ro,sa,na,Ve,Fs,Te,je,Uo,xt,ra,Wo,aa,As,ke,Dt,ia,Lt,la,Xo,da,ca,Os,V,Pt,pa,Jo,ma,fa,Yo,ua,ha,qe,Ee,ga,Zo,_a,va,es,$a,ba,ya,xe,wa,ts,Ta,ka,os,qa,Ea,xa,De,Da,ss,La,Pa,ns,Ca,za,Is,Le,Ct,Sa,rs,Fa,Ns;return E=new so({}),I=new so({}),Ge=new T({props:{name:"class transformers.Conv1D",anchor:"transformers.Conv1D",parameters:[{name:"nf",val:""},{name:"nx",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L1851",parametersDescription:[{anchor:"transformers.Conv1D.nf",description:"<strong>nf</strong> (<code>int</code>) &#x2014; The number of output features.",name:"nf"},{anchor:"transformers.Conv1D.nx",description:"<strong>nx</strong> (<code>int</code>) &#x2014; The number of input features.",name:"nx"}]}}),Ke=new T({props:{name:"class transformers.modeling_utils.PoolerStartLogits",anchor:"transformers.modeling_utils.PoolerStartLogits",parameters:[{name:"config",val:": PretrainedConfig"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L1877",parametersDescription:[{anchor:"transformers.modeling_utils.PoolerStartLogits.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>) &#x2014; The config used by the model, will be used to grab the <code>hidden_size</code> of the model.`,name:"config"}]}}),Re=new T({props:{name:"forward",anchor:"transformers.modeling_utils.PoolerStartLogits.forward",parameters:[{name:"hidden_states",val:": FloatTensor"},{name:"p_mask",val:": typing.Optional[torch.FloatTensor] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L1890",parametersDescription:[{anchor:"transformers.modeling_utils.PoolerStartLogits.forward.hidden_states",description:`<strong>hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, seq_len, hidden_size)</code>) &#x2014; The final hidden states of the model.`,name:"hidden_states"},{anchor:"transformers.modeling_utils.PoolerStartLogits.forward.p_mask",description:`<strong>p_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, seq_len)</code>, <em>optional</em>) &#x2014; Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token should be masked.`,name:"p_mask"}],returnDescription:` <p>The start logits for SQuAD.</p> `,returnType:` <p><code>torch.FloatTensor</code></p> `}}),Ue=new T({props:{name:"class transformers.modeling_utils.PoolerEndLogits",anchor:"transformers.modeling_utils.PoolerEndLogits",parameters:[{name:"config",val:": PretrainedConfig"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L1915",parametersDescription:[{anchor:"transformers.modeling_utils.PoolerEndLogits.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>) &#x2014; The config used by the model, will be used to grab the <code>hidden_size</code> of the model and the <code>layer_norm_eps</code> to use.`,name:"config"}]}}),We=new T({props:{name:"forward",anchor:"transformers.modeling_utils.PoolerEndLogits.forward",parameters:[{name:"hidden_states",val:": FloatTensor"},{name:"start_states",val:": typing.Optional[torch.FloatTensor] = None"},{name:"start_positions",val:": typing.Optional[torch.LongTensor] = None"},{name:"p_mask",val:": typing.Optional[torch.FloatTensor] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L1932",parametersDescription:[{anchor:"transformers.modeling_utils.PoolerEndLogits.forward.hidden_states",description:`<strong>hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, seq_len, hidden_size)</code>) &#x2014; The final hidden states of the model.`,name:"hidden_states"},{anchor:"transformers.modeling_utils.PoolerEndLogits.forward.start_states",description:`<strong>start_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, seq_len, hidden_size)</code>, <em>optional</em>) &#x2014; The hidden states of the first tokens for the labeled span.`,name:"start_states"},{anchor:"transformers.modeling_utils.PoolerEndLogits.forward.start_positions",description:`<strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; The position of the first token for the labeled span.`,name:"start_positions"},{anchor:"transformers.modeling_utils.PoolerEndLogits.forward.p_mask",description:`<strong>p_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, seq_len)</code>, <em>optional</em>) &#x2014; Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token should be masked.`,name:"p_mask"}],returnDescription:` <p>The end logits for SQuAD.</p> `,returnType:` <p><code>torch.FloatTensor</code></p> `}}),Ce=new oo({props:{$$slots:{default:[Yi]},$$scope:{ctx:D}}}),Xe=new T({props:{name:"class transformers.modeling_utils.PoolerAnswerClass",anchor:"transformers.modeling_utils.PoolerAnswerClass",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L1984",parametersDescription:[{anchor:"transformers.modeling_utils.PoolerAnswerClass.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>) &#x2014; The config used by the model, will be used to grab the <code>hidden_size</code> of the model.`,name:"config"}]}}),Je=new T({props:{name:"forward",anchor:"transformers.modeling_utils.PoolerAnswerClass.forward",parameters:[{name:"hidden_states",val:": FloatTensor"},{name:"start_states",val:": typing.Optional[torch.FloatTensor] = None"},{name:"start_positions",val:": typing.Optional[torch.LongTensor] = None"},{name:"cls_index",val:": typing.Optional[torch.LongTensor] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L1999",parametersDescription:[{anchor:"transformers.modeling_utils.PoolerAnswerClass.forward.hidden_states",description:`<strong>hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, seq_len, hidden_size)</code>) &#x2014; The final hidden states of the model.`,name:"hidden_states"},{anchor:"transformers.modeling_utils.PoolerAnswerClass.forward.start_states",description:`<strong>start_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, seq_len, hidden_size)</code>, <em>optional</em>) &#x2014; The hidden states of the first tokens for the labeled span.`,name:"start_states"},{anchor:"transformers.modeling_utils.PoolerAnswerClass.forward.start_positions",description:`<strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; The position of the first token for the labeled span.`,name:"start_positions"},{anchor:"transformers.modeling_utils.PoolerAnswerClass.forward.cls_index",description:`<strong>cls_index</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Position of the CLS token for each sentence in the batch. If <code>None</code>, takes the last token.`,name:"cls_index"}],returnDescription:` <p>The SQuAD 2.0 answer class.</p> `,returnType:` <p><code>torch.FloatTensor</code></p> `}}),Se=new oo({props:{$$slots:{default:[Zi]},$$scope:{ctx:D}}}),Ye=new T({props:{name:"class transformers.modeling_utils.SquadHeadOutput",anchor:"transformers.modeling_utils.SquadHeadOutput",parameters:[{name:"loss",val:": typing.Optional[torch.FloatTensor] = None"},{name:"start_top_log_probs",val:": typing.Optional[torch.FloatTensor] = None"},{name:"start_top_index",val:": typing.Optional[torch.LongTensor] = None"},{name:"end_top_log_probs",val:": typing.Optional[torch.FloatTensor] = None"},{name:"end_top_index",val:": typing.Optional[torch.LongTensor] = None"},{name:"cls_logits",val:": typing.Optional[torch.FloatTensor] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L2050",parametersDescription:[{anchor:"transformers.modeling_utils.SquadHeadOutput.loss",description:`<strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned if both <code>start_positions</code> and <code>end_positions</code> are provided) &#x2014; Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses.`,name:"loss"},{anchor:"transformers.modeling_utils.SquadHeadOutput.start_top_log_probs",description:`<strong>start_top_log_probs</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.start_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) &#x2014; Log probabilities for the top config.start_n_top start token possibilities (beam-search).`,name:"start_top_log_probs"},{anchor:"transformers.modeling_utils.SquadHeadOutput.start_top_index",description:`<strong>start_top_index</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, config.start_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) &#x2014; Indices for the top config.start_n_top start token possibilities (beam-search).`,name:"start_top_index"},{anchor:"transformers.modeling_utils.SquadHeadOutput.end_top_log_probs",description:`<strong>end_top_log_probs</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.start_n_top * config.end_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) &#x2014; Log probabilities for the top <code>config.start_n_top * config.end_n_top</code> end token possibilities (beam-search).`,name:"end_top_log_probs"},{anchor:"transformers.modeling_utils.SquadHeadOutput.end_top_index",description:`<strong>end_top_index</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, config.start_n_top * config.end_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) &#x2014; Indices for the top <code>config.start_n_top * config.end_n_top</code> end token possibilities (beam-search).`,name:"end_top_index"},{anchor:"transformers.modeling_utils.SquadHeadOutput.cls_logits",description:`<strong>cls_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) &#x2014; Log probabilities for the <code>is_impossible</code> label of the answers.`,name:"cls_logits"}]}}),et=new T({props:{name:"class transformers.modeling_utils.SQuADHead",anchor:"transformers.modeling_utils.SQuADHead",parameters:[{name:"config",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L2080",parametersDescription:[{anchor:"transformers.modeling_utils.SQuADHead.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>) &#x2014; The config used by the model, will be used to grab the <code>hidden_size</code> of the model and the <code>layer_norm_eps</code> to use.`,name:"config"}]}}),tt=new T({props:{name:"forward",anchor:"transformers.modeling_utils.SQuADHead.forward",parameters:[{name:"hidden_states",val:": FloatTensor"},{name:"start_positions",val:": typing.Optional[torch.LongTensor] = None"},{name:"end_positions",val:": typing.Optional[torch.LongTensor] = None"},{name:"cls_index",val:": typing.Optional[torch.LongTensor] = None"},{name:"is_impossible",val:": typing.Optional[torch.LongTensor] = None"},{name:"p_mask",val:": typing.Optional[torch.FloatTensor] = None"},{name:"return_dict",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L2099",parametersDescription:[{anchor:"transformers.modeling_utils.SQuADHead.forward.hidden_states",description:`<strong>hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, seq_len, hidden_size)</code>) &#x2014; Final hidden states of the model on the sequence tokens.`,name:"hidden_states"},{anchor:"transformers.modeling_utils.SQuADHead.forward.start_positions",description:`<strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Positions of the first token for the labeled span.`,name:"start_positions"},{anchor:"transformers.modeling_utils.SQuADHead.forward.end_positions",description:`<strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Positions of the last token for the labeled span.`,name:"end_positions"},{anchor:"transformers.modeling_utils.SQuADHead.forward.cls_index",description:`<strong>cls_index</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Position of the CLS token for each sentence in the batch. If <code>None</code>, takes the last token.`,name:"cls_index"},{anchor:"transformers.modeling_utils.SQuADHead.forward.is_impossible",description:`<strong>is_impossible</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Whether the question has a possible answer in the paragraph or not.`,name:"is_impossible"},{anchor:"transformers.modeling_utils.SQuADHead.forward.p_mask",description:`<strong>p_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, seq_len)</code>, <em>optional</em>) &#x2014; Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token should be masked.`,name:"p_mask"},{anchor:"transformers.modeling_utils.SQuADHead.forward.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.`,name:"return_dict"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/internal/modeling_utils#transformers.modeling_utils.SquadHeadOutput" >transformers.modeling_utils.SquadHeadOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.configuration_utils.PretrainedConfig'&gt;</code>) and inputs.</p> <ul> <li><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned if both <code>start_positions</code> and <code>end_positions</code> are provided) \u2014 Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses.</li> <li><strong>start_top_log_probs</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.start_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) \u2014 Log probabilities for the top config.start_n_top start token possibilities (beam-search).</li> <li><strong>start_top_index</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, config.start_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) \u2014 Indices for the top config.start_n_top start token possibilities (beam-search).</li> <li><strong>end_top_log_probs</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.start_n_top * config.end_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) \u2014 Log probabilities for the top <code>config.start_n_top * config.end_n_top</code> end token possibilities (beam-search).</li> <li><strong>end_top_index</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, config.start_n_top * config.end_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) \u2014 Indices for the top <code>config.start_n_top * config.end_n_top</code> end token possibilities (beam-search).</li> <li><strong>cls_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) \u2014 Log probabilities for the <code>is_impossible</code> label of the answers.</li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/internal/modeling_utils#transformers.modeling_utils.SquadHeadOutput" >transformers.modeling_utils.SquadHeadOutput</a> or <code>tuple(torch.FloatTensor)</code></p> `}}),ot=new T({props:{name:"class transformers.modeling_utils.SequenceSummary",anchor:"transformers.modeling_utils.SequenceSummary",parameters:[{name:"config",val:": PretrainedConfig"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L2197",parametersDescription:[{anchor:"transformers.modeling_utils.SequenceSummary.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>) &#x2014; The config used by the model. Relevant arguments in the config class of the model are (refer to the actual config class of your model for the default values it uses):</p> <ul> <li> <p><strong>summary_type</strong> (<code>str</code>) &#x2014; The method to use to make this summary. Accepted values are:</p> <ul> <li><code>&quot;last&quot;</code> &#x2014; Take the last token hidden state (like XLNet)</li> <li><code>&quot;first&quot;</code> &#x2014; Take the first token hidden state (like Bert)</li> <li><code>&quot;mean&quot;</code> &#x2014; Take the mean of all tokens hidden states</li> <li><code>&quot;cls_index&quot;</code> &#x2014; Supply a Tensor of classification token position (GPT/GPT-2)</li> <li><code>&quot;attn&quot;</code> &#x2014; Not implemented now, use multi-head attention</li> </ul> </li> <li> <p><strong>summary_use_proj</strong> (<code>bool</code>) &#x2014; Add a projection after the vector extraction.</p> </li> <li> <p><strong>summary_proj_to_labels</strong> (<code>bool</code>) &#x2014; If <code>True</code>, the projection outputs to <code>config.num_labels</code> classes (otherwise to <code>config.hidden_size</code>).</p> </li> <li> <p><strong>summary_activation</strong> (<code>Optional[str]</code>) &#x2014; Set to <code>&quot;tanh&quot;</code> to add a tanh activation to the output, another string or <code>None</code> will add no activation.</p> </li> <li> <p><strong>summary_first_dropout</strong> (<code>float</code>) &#x2014; Optional dropout probability before the projection and activation.</p> </li> <li> <p><strong>summary_last_dropout</strong> (<code>float</code>)&#x2014; Optional dropout probability after the projection and activation.</p> </li> </ul>`,name:"config"}]}}),st=new T({props:{name:"forward",anchor:"transformers.modeling_utils.SequenceSummary.forward",parameters:[{name:"hidden_states",val:": FloatTensor"},{name:"cls_index",val:": typing.Optional[torch.LongTensor] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L2252",parametersDescription:[{anchor:"transformers.modeling_utils.SequenceSummary.forward.hidden_states",description:`<strong>hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>[batch_size, seq_len, hidden_size]</code>) &#x2014; The hidden states of the last layer.`,name:"hidden_states"},{anchor:"transformers.modeling_utils.SequenceSummary.forward.cls_index",description:`<strong>cls_index</strong> (<code>torch.LongTensor</code> of shape <code>[batch_size]</code> or <code>[batch_size, ...]</code> where &#x2026; are optional leading dimensions of <code>hidden_states</code>, <em>optional</em>) &#x2014; Used if <code>summary_type == &quot;cls_index&quot;</code> and takes the last token of the sequence as classification token.`,name:"cls_index"}],returnDescription:` <p>The summary of the sequence hidden states.</p> `,returnType:` <p><code>torch.FloatTensor</code></p> `}}),nt=new so({}),rt=new T({props:{name:"transformers.apply_chunking_to_forward",anchor:"transformers.apply_chunking_to_forward",parameters:[{name:"forward_fn",val:": typing.Callable[..., torch.Tensor]"},{name:"chunk_size",val:": int"},{name:"chunk_dim",val:": int"},{name:"*input_tensors",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L2401",parametersDescription:[{anchor:"transformers.apply_chunking_to_forward.forward_fn",description:`<strong>forward_fn</strong> (<code>Callable[..., torch.Tensor]</code>) &#x2014; The forward function of the model.`,name:"forward_fn"},{anchor:"transformers.apply_chunking_to_forward.chunk_size",description:`<strong>chunk_size</strong> (<code>int</code>) &#x2014; The chunk size of a chunked tensor: <code>num_chunks = len(input_tensors[0]) / chunk_size</code>.`,name:"chunk_size"},{anchor:"transformers.apply_chunking_to_forward.chunk_dim",description:`<strong>chunk_dim</strong> (<code>int</code>) &#x2014; The dimension over which the <code>input_tensors</code> should be chunked.`,name:"chunk_dim"},{anchor:"transformers.apply_chunking_to_forward.input_tensors",description:`<strong>input_tensors</strong> (<code>Tuple[torch.Tensor]</code>) &#x2014; The input tensors of <code>forward_fn</code> which will be chunked`,name:"input_tensors"}],returnDescription:` <p>A tensor with the same shape as the <code>forward_fn</code> would have given if applied\`.</p> `,returnType:` <p><code>torch.Tensor</code></p> `}}),at=new Ji({props:{code:`# rename the usual forward() fn to forward_chunk() def forward_chunk(self, hidden_states): hidden_states = self.decoder(hidden_states) return hidden_states # implement a chunked forward function def forward(self, hidden_states): return apply_chunking_to_forward(self.forward_chunk, self.chunk_size_lm_head, self.seq_len_dim, hidden_states)`,highlighted:`<span class="hljs-comment"># rename the usual forward() fn to forward_chunk()</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">forward_chunk</span>(<span class="hljs-params">self, hidden_states</span>): hidden_states = self.decoder(hidden_states) <span class="hljs-keyword">return</span> hidden_states <span class="hljs-comment"># implement a chunked forward function</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">forward</span>(<span class="hljs-params">self, hidden_states</span>): <span class="hljs-keyword">return</span> apply_chunking_to_forward(self.forward_chunk, self.chunk_size_lm_head, self.seq_len_dim, hidden_states)`}}),it=new T({props:{name:"transformers.modeling_utils.find_pruneable_heads_and_indices",anchor:"transformers.modeling_utils.find_pruneable_heads_and_indices",parameters:[{name:"heads",val:": typing.List[int]"},{name:"n_heads",val:": int"},{name:"head_size",val:": int"},{name:"already_pruned_heads",val:": typing.Set[int]"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L95",parametersDescription:[{anchor:"transformers.modeling_utils.find_pruneable_heads_and_indices.heads",description:"<strong>heads</strong> (<code>List[int]</code>) &#x2014; List of the indices of heads to prune.",name:"heads"},{anchor:"transformers.modeling_utils.find_pruneable_heads_and_indices.n_heads",description:"<strong>n_heads</strong> (<code>int</code>) &#x2014; The number of heads in the model.",name:"n_heads"},{anchor:"transformers.modeling_utils.find_pruneable_heads_and_indices.head_size",description:"<strong>head_size</strong> (<code>int</code>) &#x2014; The size of each head.",name:"head_size"},{anchor:"transformers.modeling_utils.find_pruneable_heads_and_indices.already_pruned_heads",description:"<strong>already_pruned_heads</strong> (<code>Set[int]</code>) &#x2014; A set of already pruned heads.",name:"already_pruned_heads"}],returnDescription:` <p>A tuple with the remaining heads and their corresponding indices.</p> `,returnType:` <p><code>Tuple[Set[int], torch.LongTensor]</code></p> `}}),dt=new T({props:{name:"transformers.prune_layer",anchor:"transformers.prune_layer",parameters:[{name:"layer",val:": typing.Union[torch.nn.modules.linear.Linear, transformers.modeling_utils.Conv1D]"},{name:"index",val:": LongTensor"},{name:"dim",val:": typing.Optional[int] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L2377",parametersDescription:[{anchor:"transformers.prune_layer.layer",description:"<strong>layer</strong> (<code>Union[torch.nn.Linear, Conv1D]</code>) &#x2014; The layer to prune.",name:"layer"},{anchor:"transformers.prune_layer.index",description:"<strong>index</strong> (<code>torch.LongTensor</code>) &#x2014; The indices to keep in the layer.",name:"index"},{anchor:"transformers.prune_layer.dim",description:"<strong>dim</strong> (<code>int</code>, <em>optional</em>) &#x2014; The dimension on which to keep the indices.",name:"dim"}],returnDescription:` <p>The pruned layer as a new layer with <code>requires_grad=True</code>.</p> `,returnType:` <p><code>torch.nn.Linear</code> or <a href="/docs/transformers/pr_16143/en/internal/modeling_utils#transformers.Conv1D" >Conv1D</a></p> `}}),ct=new T({props:{name:"transformers.modeling_utils.prune_conv1d_layer",anchor:"transformers.modeling_utils.prune_conv1d_layer",parameters:[{name:"layer",val:": Conv1D"},{name:"index",val:": LongTensor"},{name:"dim",val:": int = 1"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L2344",parametersDescription:[{anchor:"transformers.modeling_utils.prune_conv1d_layer.layer",description:'<strong>layer</strong> (<a href="/docs/transformers/pr_16143/en/internal/modeling_utils#transformers.Conv1D">Conv1D</a>) &#x2014; The layer to prune.',name:"layer"},{anchor:"transformers.modeling_utils.prune_conv1d_layer.index",description:"<strong>index</strong> (<code>torch.LongTensor</code>) &#x2014; The indices to keep in the layer.",name:"index"},{anchor:"transformers.modeling_utils.prune_conv1d_layer.dim",description:"<strong>dim</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; The dimension on which to keep the indices.",name:"dim"}],returnDescription:` <p>The pruned layer as a new layer with <code>requires_grad=True</code>.</p> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/internal/modeling_utils#transformers.Conv1D" >Conv1D</a></p> `}}),pt=new T({props:{name:"transformers.modeling_utils.prune_linear_layer",anchor:"transformers.modeling_utils.prune_linear_layer",parameters:[{name:"layer",val:": Linear"},{name:"index",val:": LongTensor"},{name:"dim",val:": int = 0"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L2310",parametersDescription:[{anchor:"transformers.modeling_utils.prune_linear_layer.layer",description:"<strong>layer</strong> (<code>torch.nn.Linear</code>) &#x2014; The layer to prune.",name:"layer"},{anchor:"transformers.modeling_utils.prune_linear_layer.index",description:"<strong>index</strong> (<code>torch.LongTensor</code>) &#x2014; The indices to keep in the layer.",name:"index"},{anchor:"transformers.modeling_utils.prune_linear_layer.dim",description:"<strong>dim</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; The dimension on which to keep the indices.",name:"dim"}],returnDescription:` <p>The pruned layer as a new layer with <code>requires_grad=True</code>.</p> `,returnType:` <p><code>torch.nn.Linear</code></p> `}}),mt=new so({}),ft=new T({props:{name:"class transformers.modeling_tf_utils.TFConv1D",anchor:"transformers.modeling_tf_utils.TFConv1D",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L1819",parametersDescription:[{anchor:"transformers.modeling_tf_utils.TFConv1D.nf",description:`<strong>nf</strong> (<code>int</code>) &#x2014; The number of output features.`,name:"nf"},{anchor:"transformers.modeling_tf_utils.TFConv1D.nx",description:`<strong>nx</strong> (<code>int</code>) &#x2014; The number of input features.`,name:"nx"},{anchor:"transformers.modeling_tf_utils.TFConv1D.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation to use to initialize the weights. kwargs &#x2014; Additional keyword arguments passed along to the <code>__init__</code> of <code>tf.keras.layers.Layer</code>.`,name:"initializer_range"}]}}),ut=new T({props:{name:"class transformers.TFSharedEmbeddings",anchor:"transformers.TFSharedEmbeddings",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L1859",parametersDescription:[{anchor:"transformers.TFSharedEmbeddings.vocab_size",description:`<strong>vocab_size</strong> (<code>int</code>) &#x2014; The size of the vocabulary, e.g., the number of unique tokens.`,name:"vocab_size"},{anchor:"transformers.TFSharedEmbeddings.hidden_size",description:`<strong>hidden_size</strong> (<code>int</code>) &#x2014; The size of the embedding vectors.`,name:"hidden_size"},{anchor:"transformers.TFSharedEmbeddings.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, <em>optional</em>) &#x2014; The standard deviation to use when initializing the weights. If no value is provided, it will default to {@html &quot;<span class="\\&quot;katex\\&quot;"><span class="\\&quot;katex-mathml\\&quot;"><math xmlns="\\&quot;http://www.w3.org/1998/Math/MathML\\&quot;"><semantics><mrow><mn>1</mn><mi mathvariant="\\&quot;normal\\&quot;">/</mi><msqrt><mrow><mi>h</mi><mi>i</mi><mi>d</mi><mi>d</mi><mi>e</mi><mi>n</mi><mi mathvariant="\\&quot;normal\\&quot;">_</mi><mi>s</mi><mi>i</mi><mi>z</mi><mi>e</mi></mrow></msqrt></mrow><annotation encoding="\\&quot;application/x-tex\\&quot;">1/\\\\sqrt{hidden\\\\_size}</annotation></semantics></math></span><span class="\\&quot;katex-html\\&quot;" aria-hidden="\\&quot;true\\&quot;"><span class="\\&quot;base\\&quot;"><span class="\\&quot;strut\\&quot;" style="\\&quot;height:1.24em;vertical-align:-0.3628em;\\&quot;"></span><span class="\\&quot;mord\\&quot;">1/</span><span class="\\&quot;mord" sqrt\\"><span class="\\&quot;vlist-t" vlist-t2\\"><span class="\\&quot;vlist-r\\&quot;"><span class="\\&quot;vlist\\&quot;" style="\\&quot;height:0.8772em;\\&quot;"><span class="\\&quot;svg-align\\&quot;" style="\\&quot;top:-3.2em;\\&quot;"><span class="\\&quot;pstrut\\&quot;" style="\\&quot;height:3.2em;\\&quot;"></span><span class="\\&quot;mord\\&quot;" style="\\&quot;padding-left:1em;\\&quot;"><span class="\\&quot;mord" mathnormal\\">hi</span><span class="\\&quot;mord" mathnormal\\">dd</span><span class="\\&quot;mord" mathnormal\\">e</span><span class="\\&quot;mord" mathnormal\\">n</span><span class="\\&quot;mord\\&quot;" style="\\&quot;margin-right:0.02778em;\\&quot;">_</span><span class="\\&quot;mord" mathnormal\\">s</span><span class="\\&quot;mord" mathnormal\\">i</span><span class="\\&quot;mord" mathnormal\\">ze</span></span></span><span style="\\&quot;top:-2.8372em;\\&quot;"><span class="\\&quot;pstrut\\&quot;" style="\\&quot;height:3.2em;\\&quot;"></span><span class="\\&quot;hide-tail\\&quot;" style="\\&quot;min-width:1.02em;height:1.28em;\\&quot;"><svg xmlns="\\&quot;http://www.w3.org/2000/svg\\&quot;" width="400em" height="1.28em" viewBox="0 0 400000 1296" preserveAspectRatio="xMinYMin slice"><path d="M263,681c0.7,0,18,39.7,52,119\\nc34,79.3,68.167,158.7,102.5,238c34.3,79.3,51.8,119.3,52.5,120\\nc340,-704.7,510.7,-1060.3,512,-1067\\nl0 -0\\nc4.7,-7.3,11,-11,19,-11\\nH40000v40H1012.3\\ns-271.3,567,-271.3,567c-38.7,80.7,-84,175,-136,283c-52,108,-89.167,185.3,-111.5,232\\nc-22.3,46.7,-33.8,70.3,-34.5,71c-4.7,4.7,-12.3,7,-23,7s-12,-1,-12,-1\\ns-109,-253,-109,-253c-72.7,-168,-109.3,-252,-110,-252c-10.7,8,-22,16.7,-34,26\\nc-22,17.3,-33.3,26,-34,26s-26,-26,-26,-26s76,-59,76,-59s76,-60,76,-60z\\nM1001 80h400000v40h-400000z"/></svg></span></span></span><span class="\\&quot;vlist-s\\&quot;">&#x200B;</span></span><span class="\\&quot;vlist-r\\&quot;"><span class="\\&quot;vlist\\&quot;" style="\\&quot;height:0.3628em;\\&quot;"><span></span></span></span></span></span></span></span></span>&quot;}. kwargs &#x2014; Additional keyword arguments passed along to the <code>__init__</code> of <code>tf.keras.layers.Layer</code>.`,name:"initializer_range"}]}}),ht=new T({props:{name:"call",anchor:"transformers.TFSharedEmbeddings.call",parameters:[{name:"inputs",val:": Tensor"},{name:"mode",val:": str = 'embedding'"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L1904",parametersDescription:[{anchor:"transformers.TFSharedEmbeddings.call.inputs",description:`<strong>inputs</strong> (<code>tf.Tensor</code>) &#x2014; In embedding mode, should be an int64 tensor with shape <code>[batch_size, length]</code>.</p> <p>In linear mode, should be a float tensor with shape <code>[batch_size, length, hidden_size]</code>.`,name:"inputs"},{anchor:"transformers.TFSharedEmbeddings.call.mode",description:`<strong>mode</strong> (<code>str</code>, defaults to <code>&quot;embedding&quot;</code>) &#x2014; A valid value is either <code>&quot;embedding&quot;</code> or <code>&quot;linear&quot;</code>, the first one indicates that the layer should be used as an embedding layer, the second one that the layer should be used as a linear decoder.`,name:"mode"}],returnDescription:` <p>In embedding mode, the output is a float32 embedding tensor, with shape <code>[batch_size, length, embedding_size]</code>.</p> <p>In linear mode, the output is a float32 with shape <code>[batch_size, length, vocab_size]</code>.</p> `,returnType:` <p><code>tf.Tensor</code></p> `}}),vt=new T({props:{name:"class transformers.TFSequenceSummary",anchor:"transformers.TFSequenceSummary",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L1957",parametersDescription:[{anchor:"transformers.TFSequenceSummary.config",description:`<strong>config</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>) &#x2014; The config used by the model. Relevant arguments in the config class of the model are (refer to the actual config class of your model for the default values it uses):</p> <ul> <li> <p><strong>summary_type</strong> (<code>str</code>) &#x2014; The method to use to make this summary. Accepted values are:</p> <ul> <li><code>&quot;last&quot;</code> &#x2014; Take the last token hidden state (like XLNet)</li> <li><code>&quot;first&quot;</code> &#x2014; Take the first token hidden state (like Bert)</li> <li><code>&quot;mean&quot;</code> &#x2014; Take the mean of all tokens hidden states</li> <li><code>&quot;cls_index&quot;</code> &#x2014; Supply a Tensor of classification token position (GPT/GPT-2)</li> <li><code>&quot;attn&quot;</code> &#x2014; Not implemented now, use multi-head attention</li> </ul> </li> <li> <p><strong>summary_use_proj</strong> (<code>bool</code>) &#x2014; Add a projection after the vector extraction.</p> </li> <li> <p><strong>summary_proj_to_labels</strong> (<code>bool</code>) &#x2014; If <code>True</code>, the projection outputs to <code>config.num_labels</code> classes (otherwise to <code>config.hidden_size</code>).</p> </li> <li> <p><strong>summary_activation</strong> (<code>Optional[str]</code>) &#x2014; Set to <code>&quot;tanh&quot;</code> to add a tanh activation to the output, another string or <code>None</code> will add no activation.</p> </li> <li> <p><strong>summary_first_dropout</strong> (<code>float</code>) &#x2014; Optional dropout probability before the projection and activation.</p> </li> <li> <p><strong>summary_last_dropout</strong> (<code>float</code>)&#x2014; Optional dropout probability after the projection and activation.</p> </li> </ul>`,name:"config"},{anchor:"transformers.TFSequenceSummary.initializer_range",description:`<strong>initializer_range</strong> (<code>float</code>, defaults to 0.02) &#x2014; The standard deviation to use to initialize the weights. kwargs &#x2014; Additional keyword arguments passed along to the <code>__init__</code> of <code>tf.keras.layers.Layer</code>.`,name:"initializer_range"}]}}),$t=new T({props:{name:"register_for_auto_class",anchor:"transformers.TFSequenceSummary.register_for_auto_class",parameters:[{name:"auto_class",val:" = 'TFAutoModel'"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L2072",parametersDescription:[{anchor:"transformers.TFSequenceSummary.register_for_auto_class.auto_class",description:`<strong>auto_class</strong> (<code>str</code> or <code>type</code>, <em>optional</em>, defaults to <code>&quot;TFAutoModel&quot;</code>) &#x2014; The auto class to register this new model with.`,name:"auto_class"}]}}),Ie=new oo({props:{warning:"&lcub;true}",$$slots:{default:[el]},$$scope:{ctx:D}}}),bt=new so({}),yt=new T({props:{name:"class transformers.modeling_tf_utils.TFCausalLanguageModelingLoss",anchor:"transformers.modeling_tf_utils.TFCausalLanguageModelingLoss",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L170"}}),He=new oo({props:{$$slots:{default:[tl]},$$scope:{ctx:D}}}),wt=new T({props:{name:"class transformers.modeling_tf_utils.TFMaskedLanguageModelingLoss",anchor:"transformers.modeling_tf_utils.TFMaskedLanguageModelingLoss",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L261"}}),Me=new oo({props:{$$slots:{default:[ol]},$$scope:{ctx:D}}}),Tt=new T({props:{name:"class transformers.modeling_tf_utils.TFMultipleChoiceLoss",anchor:"transformers.modeling_tf_utils.TFMultipleChoiceLoss",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L251"}}),kt=new T({props:{name:"class transformers.modeling_tf_utils.TFQuestionAnsweringLoss",anchor:"transformers.modeling_tf_utils.TFQuestionAnsweringLoss",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L192"}}),qt=new T({props:{name:"class transformers.modeling_tf_utils.TFSequenceClassificationLoss",anchor:"transformers.modeling_tf_utils.TFSequenceClassificationLoss",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L235"}}),Et=new T({props:{name:"class transformers.modeling_tf_utils.TFTokenClassificationLoss",anchor:"transformers.modeling_tf_utils.TFTokenClassificationLoss",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L207"}}),Ve=new oo({props:{$$slots:{default:[sl]},$$scope:{ctx:D}}}),xt=new so({}),Dt=new T({props:{name:"transformers.modeling_tf_utils.get_initializer",anchor:"transformers.modeling_tf_utils.get_initializer",parameters:[{name:"initializer_range",val:": float = 0.02"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L2099",parametersDescription:[{anchor:"transformers.modeling_tf_utils.get_initializer.initializer_range",description:"<strong>initializer_range</strong> (<em>float</em>, defaults to 0.02) &#x2014; Standard deviation of the initializer range.",name:"initializer_range"}],returnDescription:` <p>The truncated normal initializer.</p> `,returnType:` <p><code>tf.initializers.TruncatedNormal</code></p> `}}),Pt=new T({props:{name:"transformers.modeling_tf_utils.keras_serializable",anchor:"transformers.modeling_tf_utils.keras_serializable",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L105",parametersDescription:[{anchor:"transformers.modeling_tf_utils.keras_serializable.cls",description:`<strong>cls</strong> (a <code>tf.keras.layers.Layers subclass</code>) &#x2014; Typically a <code>TF.MainLayer</code> class in this project, in general must accept a <code>config</code> argument to its initializer.`,name:"cls"}],returnDescription:` <p>The same class object, with modifications for Keras deserialization.</p> `}}),Ct=new T({props:{name:"transformers.shape_list",anchor:"transformers.shape_list",parameters:[{name:"tensor",val:": typing.Union[tensorflow.python.framework.ops.Tensor, numpy.ndarray]"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tf_utils.py#L31",parametersDescription:[{anchor:"transformers.shape_list.tensor",description:"<strong>tensor</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code>) &#x2014; The tensor we want the shape of.",name:"tensor"}],returnDescription:` <p>The shape of the tensor as a list.</p> `,returnType:` <p><code>List[int]</code></p> `}}),{c(){p=s("meta"),k=l(),u=s("h1"),y=s("a"),A=s("span"),h(E.$$.fragment),G=l(),O=s("span"),L=a("Custom Layers and Utilities"),j=l(),S=s("p"),P=a("This page lists all the custom layers used by the library, as well as the utility functions it provides for modeling."),Q=l(),F=s("p"),C=a("Most of those are only useful if you are studying the code of the models in the library."),B=l(),x=s("h2"),q=s("a"),w=s("span"),h(I.$$.fragment),fe=l(),K=s("span"),ue=a("Pytorch custom modules"),ie=l(),R=s("div"),h(Ge.$$.fragment),nn=l(),no=s("p"),rn=a("1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2)."),an=l(),ro=s("p"),ln=a("Basically works like a linear layer but the weights are transposed."),ps=l(),U=s("div"),h(Ke.$$.fragment),dn=l(),ao=s("p"),cn=a("Compute SQuAD start logits from sequence hidden states."),pn=l(),St=s("div"),h(Re.$$.fragment),ms=l(),W=s("div"),h(Ue.$$.fragment),mn=l(),io=s("p"),fn=a("Compute SQuAD end logits from sequence hidden states."),un=l(),Pe=s("div"),h(We.$$.fragment),hn=l(),h(Ce.$$.fragment),fs=l(),X=s("div"),h(Xe.$$.fragment),gn=l(),lo=s("p"),_n=a("Compute SQuAD 2.0 answer class from classification and start tokens hidden states."),vn=l(),ze=s("div"),h(Je.$$.fragment),$n=l(),h(Se.$$.fragment),us=l(),he=s("div"),h(Ye.$$.fragment),bn=l(),Ze=s("p"),yn=a("Base class for outputs of question answering models using a "),Ft=s("a"),wn=a("SQuADHead"),Tn=a("."),hs=l(),J=s("div"),h(et.$$.fragment),kn=l(),co=s("p"),qn=a("A SQuAD head inspired by XLNet."),En=l(),At=s("div"),h(tt.$$.fragment),gs=l(),Y=s("div"),h(ot.$$.fragment),xn=l(),po=s("p"),Dn=a("Compute a single vector summary of a sequence hidden states."),Ln=l(),Fe=s("div"),h(st.$$.fragment),Pn=l(),mo=s("p"),Cn=a("Compute a single vector summary of a sequence hidden states."),_s=l(),ge=s("h2"),Ae=s("a"),fo=s("span"),h(nt.$$.fragment),zn=l(),uo=s("span"),Sn=a("PyTorch Helper Functions"),vs=l(),z=s("div"),h(rt.$$.fragment),Fn=l(),N=s("p"),An=a("This function chunks the "),ho=s("code"),On=a("input_tensors"),In=a(" into smaller input tensor parts of size "),go=s("code"),Nn=a("chunk_size"),Hn=a(` over the dimension `),_o=s("code"),Mn=a("chunk_dim"),Vn=a(". It then applies a layer "),vo=s("code"),jn=a("forward_fn"),Qn=a(" to each chunk independently to save memory."),Bn=l(),H=s("p"),Gn=a("If the "),$o=s("code"),Kn=a("forward_fn"),Rn=a(" is independent across the "),bo=s("code"),Un=a("chunk_dim"),Wn=a(` this function will yield the same result as directly applying `),yo=s("code"),Xn=a("forward_fn"),Jn=a(" to "),wo=s("code"),Yn=a("input_tensors"),Zn=a("."),er=l(),To=s("p"),tr=a("Examples:"),or=l(),h(at.$$.fragment),$s=l(),_e=s("div"),h(it.$$.fragment),sr=l(),lt=s("p"),nr=a("Finds the heads and their indices taking "),ko=s("code"),rr=a("already_pruned_heads"),ar=a(" into account."),bs=l(),Z=s("div"),h(dt.$$.fragment),ir=l(),qo=s("p"),lr=a("Prune a Conv1D or linear layer to keep only entries in index."),dr=l(),Eo=s("p"),cr=a("Used to remove heads."),ys=l(),ee=s("div"),h(ct.$$.fragment),pr=l(),xo=s("p"),mr=a(`Prune a Conv1D layer to keep only entries in index. A Conv1D work as a Linear layer (see e.g. BERT) but the weights are transposed.`),fr=l(),Do=s("p"),ur=a("Used to remove heads."),ws=l(),te=s("div"),h(pt.$$.fragment),hr=l(),Lo=s("p"),gr=a("Prune a linear layer to keep only entries in index."),_r=l(),Po=s("p"),vr=a("Used to remove heads."),Ts=l(),ve=s("h2"),Oe=s("a"),Co=s("span"),h(mt.$$.fragment),$r=l(),zo=s("span"),br=a("TensorFlow custom layers"),ks=l(),oe=s("div"),h(ft.$$.fragment),yr=l(),So=s("p"),wr=a("1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2)."),Tr=l(),Fo=s("p"),kr=a("Basically works like a linear layer but the weights are transposed."),qs=l(),M=s("div"),h(ut.$$.fragment),qr=l(),Ao=s("p"),Er=a("Construct shared token embeddings."),xr=l(),Oo=s("p"),Dr=a(`The weights of the embedding layer is usually shared with the weights of the linear decoder when doing language modeling.`),Lr=l(),le=s("div"),h(ht.$$.fragment),Pr=l(),Io=s("p"),Cr=a("Get token embeddings of inputs or decode final hidden state."),zr=l(),gt=s("p"),Sr=a(`Shared weights logic is adapted from `),_t=s("a"),Fr=a("here"),Ar=a("."),Es=l(),se=s("div"),h(vt.$$.fragment),Or=l(),No=s("p"),Ir=a("Compute a single vector summary of a sequence hidden states."),Nr=l(),de=s("div"),h($t.$$.fragment),Hr=l(),Ho=s("p"),Mr=a(`Register this class with a given auto class. This should only be used for custom models as the ones in the library are already mapped with an auto class.`),Vr=l(),h(Ie.$$.fragment),xs=l(),$e=s("h2"),Ne=s("a"),Mo=s("span"),h(bt.$$.fragment),jr=l(),Vo=s("span"),Qr=a("TensorFlow loss functions"),Ds=l(),ne=s("div"),h(yt.$$.fragment),Br=l(),jo=s("p"),Gr=a("Loss function suitable for causal language modeling (CLM), that is, the task of guessing the next token."),Kr=l(),h(He.$$.fragment),Ls=l(),re=s("div"),h(wt.$$.fragment),Rr=l(),Qo=s("p"),Ur=a("Loss function suitable for masked language modeling (MLM), that is, the task of guessing the masked tokens."),Wr=l(),h(Me.$$.fragment),Ps=l(),be=s("div"),h(Tt.$$.fragment),Xr=l(),Bo=s("p"),Jr=a("Loss function suitable for multiple choice tasks."),Cs=l(),ye=s("div"),h(kt.$$.fragment),Yr=l(),Go=s("p"),Zr=a("Loss function suitable for question answering."),zs=l(),we=s("div"),h(qt.$$.fragment),ea=l(),Ko=s("p"),ta=a("Loss function suitable for sequence classification."),Ss=l(),ae=s("div"),h(Et.$$.fragment),oa=l(),Ro=s("p"),sa=a("Loss function suitable for token classification."),na=l(),h(Ve.$$.fragment),Fs=l(),Te=s("h2"),je=s("a"),Uo=s("span"),h(xt.$$.fragment),ra=l(),Wo=s("span"),aa=a("TensorFlow Helper Functions"),As=l(),ke=s("div"),h(Dt.$$.fragment),ia=l(),Lt=s("p"),la=a("Creates a "),Xo=s("code"),da=a("tf.initializers.TruncatedNormal"),ca=a(" with the given range."),Os=l(),V=s("div"),h(Pt.$$.fragment),pa=l(),Jo=s("p"),ma=a("Decorate a Keras Layer class to support Keras serialization."),fa=l(),Yo=s("p"),ua=a("This is done by:"),ha=l(),qe=s("ol"),Ee=s("li"),ga=a("Adding a "),Zo=s("code"),_a=a("transformers_config"),va=a(" dict to the Keras config dictionary in "),es=s("code"),$a=a("get_config"),ba=a(` (called by Keras at serialization time.`),ya=l(),xe=s("li"),wa=a("Wrapping "),ts=s("code"),Ta=a("__init__"),ka=a(" to accept that "),os=s("code"),qa=a("transformers_config"),Ea=a(` dict (passed by Keras at deserialization time) and convert it to a config object for the actual layer initializer.`),xa=l(),De=s("li"),Da=a(`Registering the class as a custom object in Keras (if the Tensorflow version supports this), so that it does not need to be supplied in `),ss=s("code"),La=a("custom_objects"),Pa=a(" in the call to "),ns=s("code"),Ca=a("tf.keras.models.load_model"),za=a("."),Is=l(),Le=s("div"),h(Ct.$$.fragment),Sa=l(),rs=s("p"),Fa=a("Deal with dynamic shape in tensorflow cleanly."),this.h()},l(e){const c=Xi('[data-svelte="svelte-1phssyn"]',document.head);p=n(c,"META",{name:!0,content:!0}),c.forEach(o),k=d(e),u=n(e,"H1",{class:!0});var zt=r(u);y=n(zt,"A",{id:!0,class:!0,href:!0});var as=r(y);A=n(as,"SPAN",{});var is=r(A);g(E.$$.fragment,is),is.forEach(o),as.forEach(o),G=d(zt),O=n(zt,"SPAN",{});var ls=r(O);L=i(ls,"Custom Layers and Utilities"),ls.forEach(o),zt.forEach(o),j=d(e),S=n(e,"P",{});var ds=r(S);P=i(ds,"This page lists all the custom layers used by the library, as well as the utility functions it provides for modeling."),ds.forEach(o),Q=d(e),F=n(e,"P",{});var cs=r(F);C=i(cs,"Most of those are only useful if you are studying the code of the models in the library."),cs.forEach(o),B=d(e),x=n(e,"H2",{class:!0});var Hs=r(x);q=n(Hs,"A",{id:!0,class:!0,href:!0});var Aa=r(q);w=n(Aa,"SPAN",{});var Oa=r(w);g(I.$$.fragment,Oa),Oa.forEach(o),Aa.forEach(o),fe=d(Hs),K=n(Hs,"SPAN",{});var Ia=r(K);ue=i(Ia,"Pytorch custom modules"),Ia.forEach(o),Hs.forEach(o),ie=d(e),R=n(e,"DIV",{class:!0});var Ot=r(R);g(Ge.$$.fragment,Ot),nn=d(Ot),no=n(Ot,"P",{});var Na=r(no);rn=i(Na,"1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2)."),Na.forEach(o),an=d(Ot),ro=n(Ot,"P",{});var Ha=r(ro);ln=i(Ha,"Basically works like a linear layer but the weights are transposed."),Ha.forEach(o),Ot.forEach(o),ps=d(e),U=n(e,"DIV",{class:!0});var It=r(U);g(Ke.$$.fragment,It),dn=d(It),ao=n(It,"P",{});var Ma=r(ao);cn=i(Ma,"Compute SQuAD start logits from sequence hidden states."),Ma.forEach(o),pn=d(It),St=n(It,"DIV",{class:!0});var Va=r(St);g(Re.$$.fragment,Va),Va.forEach(o),It.forEach(o),ms=d(e),W=n(e,"DIV",{class:!0});var Nt=r(W);g(Ue.$$.fragment,Nt),mn=d(Nt),io=n(Nt,"P",{});var ja=r(io);fn=i(ja,"Compute SQuAD end logits from sequence hidden states."),ja.forEach(o),un=d(Nt),Pe=n(Nt,"DIV",{class:!0});var Ms=r(Pe);g(We.$$.fragment,Ms),hn=d(Ms),g(Ce.$$.fragment,Ms),Ms.forEach(o),Nt.forEach(o),fs=d(e),X=n(e,"DIV",{class:!0});var Ht=r(X);g(Xe.$$.fragment,Ht),gn=d(Ht),lo=n(Ht,"P",{});var Qa=r(lo);_n=i(Qa,"Compute SQuAD 2.0 answer class from classification and start tokens hidden states."),Qa.forEach(o),vn=d(Ht),ze=n(Ht,"DIV",{class:!0});var Vs=r(ze);g(Je.$$.fragment,Vs),$n=d(Vs),g(Se.$$.fragment,Vs),Vs.forEach(o),Ht.forEach(o),us=d(e),he=n(e,"DIV",{class:!0});var js=r(he);g(Ye.$$.fragment,js),bn=d(js),Ze=n(js,"P",{});var Qs=r(Ze);yn=i(Qs,"Base class for outputs of question answering models using a "),Ft=n(Qs,"A",{href:!0});var Ba=r(Ft);wn=i(Ba,"SQuADHead"),Ba.forEach(o),Tn=i(Qs,"."),Qs.forEach(o),js.forEach(o),hs=d(e),J=n(e,"DIV",{class:!0});var Mt=r(J);g(et.$$.fragment,Mt),kn=d(Mt),co=n(Mt,"P",{});var Ga=r(co);qn=i(Ga,"A SQuAD head inspired by XLNet."),Ga.forEach(o),En=d(Mt),At=n(Mt,"DIV",{class:!0});var Ka=r(At);g(tt.$$.fragment,Ka),Ka.forEach(o),Mt.forEach(o),gs=d(e),Y=n(e,"DIV",{class:!0});var Vt=r(Y);g(ot.$$.fragment,Vt),xn=d(Vt),po=n(Vt,"P",{});var Ra=r(po);Dn=i(Ra,"Compute a single vector summary of a sequence hidden states."),Ra.forEach(o),Ln=d(Vt),Fe=n(Vt,"DIV",{class:!0});var Bs=r(Fe);g(st.$$.fragment,Bs),Pn=d(Bs),mo=n(Bs,"P",{});var Ua=r(mo);Cn=i(Ua,"Compute a single vector summary of a sequence hidden states."),Ua.forEach(o),Bs.forEach(o),Vt.forEach(o),_s=d(e),ge=n(e,"H2",{class:!0});var Gs=r(ge);Ae=n(Gs,"A",{id:!0,class:!0,href:!0});var Wa=r(Ae);fo=n(Wa,"SPAN",{});var Xa=r(fo);g(nt.$$.fragment,Xa),Xa.forEach(o),Wa.forEach(o),zn=d(Gs),uo=n(Gs,"SPAN",{});var Ja=r(uo);Sn=i(Ja,"PyTorch Helper Functions"),Ja.forEach(o),Gs.forEach(o),vs=d(e),z=n(e,"DIV",{class:!0});var ce=r(z);g(rt.$$.fragment,ce),Fn=d(ce),N=n(ce,"P",{});var pe=r(N);An=i(pe,"This function chunks the "),ho=n(pe,"CODE",{});var Ya=r(ho);On=i(Ya,"input_tensors"),Ya.forEach(o),In=i(pe," into smaller input tensor parts of size "),go=n(pe,"CODE",{});var Za=r(go);Nn=i(Za,"chunk_size"),Za.forEach(o),Hn=i(pe,` over the dimension `),_o=n(pe,"CODE",{});var ei=r(_o);Mn=i(ei,"chunk_dim"),ei.forEach(o),Vn=i(pe,". It then applies a layer "),vo=n(pe,"CODE",{});var ti=r(vo);jn=i(ti,"forward_fn"),ti.forEach(o),Qn=i(pe," to each chunk independently to save memory."),pe.forEach(o),Bn=d(ce),H=n(ce,"P",{});var me=r(H);Gn=i(me,"If the "),$o=n(me,"CODE",{});var oi=r($o);Kn=i(oi,"forward_fn"),oi.forEach(o),Rn=i(me," is independent across the "),bo=n(me,"CODE",{});var si=r(bo);Un=i(si,"chunk_dim"),si.forEach(o),Wn=i(me,` this function will yield the same result as directly applying `),yo=n(me,"CODE",{});var ni=r(yo);Xn=i(ni,"forward_fn"),ni.forEach(o),Jn=i(me," to "),wo=n(me,"CODE",{});var ri=r(wo);Yn=i(ri,"input_tensors"),ri.forEach(o),Zn=i(me,"."),me.forEach(o),er=d(ce),To=n(ce,"P",{});var ai=r(To);tr=i(ai,"Examples:"),ai.forEach(o),or=d(ce),g(at.$$.fragment,ce),ce.forEach(o),$s=d(e),_e=n(e,"DIV",{class:!0});var Ks=r(_e);g(it.$$.fragment,Ks),sr=d(Ks),lt=n(Ks,"P",{});var Rs=r(lt);nr=i(Rs,"Finds the heads and their indices taking "),ko=n(Rs,"CODE",{});var ii=r(ko);rr=i(ii,"already_pruned_heads"),ii.forEach(o),ar=i(Rs," into account."),Rs.forEach(o),Ks.forEach(o),bs=d(e),Z=n(e,"DIV",{class:!0});var jt=r(Z);g(dt.$$.fragment,jt),ir=d(jt),qo=n(jt,"P",{});var li=r(qo);lr=i(li,"Prune a Conv1D or linear layer to keep only entries in index."),li.forEach(o),dr=d(jt),Eo=n(jt,"P",{});var di=r(Eo);cr=i(di,"Used to remove heads."),di.forEach(o),jt.forEach(o),ys=d(e),ee=n(e,"DIV",{class:!0});var Qt=r(ee);g(ct.$$.fragment,Qt),pr=d(Qt),xo=n(Qt,"P",{});var ci=r(xo);mr=i(ci,`Prune a Conv1D layer to keep only entries in index. A Conv1D work as a Linear layer (see e.g. BERT) but the weights are transposed.`),ci.forEach(o),fr=d(Qt),Do=n(Qt,"P",{});var pi=r(Do);ur=i(pi,"Used to remove heads."),pi.forEach(o),Qt.forEach(o),ws=d(e),te=n(e,"DIV",{class:!0});var Bt=r(te);g(pt.$$.fragment,Bt),hr=d(Bt),Lo=n(Bt,"P",{});var mi=r(Lo);gr=i(mi,"Prune a linear layer to keep only entries in index."),mi.forEach(o),_r=d(Bt),Po=n(Bt,"P",{});var fi=r(Po);vr=i(fi,"Used to remove heads."),fi.forEach(o),Bt.forEach(o),Ts=d(e),ve=n(e,"H2",{class:!0});var Us=r(ve);Oe=n(Us,"A",{id:!0,class:!0,href:!0});var ui=r(Oe);Co=n(ui,"SPAN",{});var hi=r(Co);g(mt.$$.fragment,hi),hi.forEach(o),ui.forEach(o),$r=d(Us),zo=n(Us,"SPAN",{});var gi=r(zo);br=i(gi,"TensorFlow custom layers"),gi.forEach(o),Us.forEach(o),ks=d(e),oe=n(e,"DIV",{class:!0});var Gt=r(oe);g(ft.$$.fragment,Gt),yr=d(Gt),So=n(Gt,"P",{});var _i=r(So);wr=i(_i,"1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2)."),_i.forEach(o),Tr=d(Gt),Fo=n(Gt,"P",{});var vi=r(Fo);kr=i(vi,"Basically works like a linear layer but the weights are transposed."),vi.forEach(o),Gt.forEach(o),qs=d(e),M=n(e,"DIV",{class:!0});var Qe=r(M);g(ut.$$.fragment,Qe),qr=d(Qe),Ao=n(Qe,"P",{});var $i=r(Ao);Er=i($i,"Construct shared token embeddings."),$i.forEach(o),xr=d(Qe),Oo=n(Qe,"P",{});var bi=r(Oo);Dr=i(bi,`The weights of the embedding layer is usually shared with the weights of the linear decoder when doing language modeling.`),bi.forEach(o),Lr=d(Qe),le=n(Qe,"DIV",{class:!0});var Kt=r(le);g(ht.$$.fragment,Kt),Pr=d(Kt),Io=n(Kt,"P",{});var yi=r(Io);Cr=i(yi,"Get token embeddings of inputs or decode final hidden state."),yi.forEach(o),zr=d(Kt),gt=n(Kt,"P",{});var Ws=r(gt);Sr=i(Ws,`Shared weights logic is adapted from `),_t=n(Ws,"A",{href:!0,rel:!0});var wi=r(_t);Fr=i(wi,"here"),wi.forEach(o),Ar=i(Ws,"."),Ws.forEach(o),Kt.forEach(o),Qe.forEach(o),Es=d(e),se=n(e,"DIV",{class:!0});var Rt=r(se);g(vt.$$.fragment,Rt),Or=d(Rt),No=n(Rt,"P",{});var Ti=r(No);Ir=i(Ti,"Compute a single vector summary of a sequence hidden states."),Ti.forEach(o),Nr=d(Rt),de=n(Rt,"DIV",{class:!0});var Ut=r(de);g($t.$$.fragment,Ut),Hr=d(Ut),Ho=n(Ut,"P",{});var ki=r(Ho);Mr=i(ki,`Register this class with a given auto class. This should only be used for custom models as the ones in the library are already mapped with an auto class.`),ki.forEach(o),Vr=d(Ut),g(Ie.$$.fragment,Ut),Ut.forEach(o),Rt.forEach(o),xs=d(e),$e=n(e,"H2",{class:!0});var Xs=r($e);Ne=n(Xs,"A",{id:!0,class:!0,href:!0});var qi=r(Ne);Mo=n(qi,"SPAN",{});var Ei=r(Mo);g(bt.$$.fragment,Ei),Ei.forEach(o),qi.forEach(o),jr=d(Xs),Vo=n(Xs,"SPAN",{});var xi=r(Vo);Qr=i(xi,"TensorFlow loss functions"),xi.forEach(o),Xs.forEach(o),Ds=d(e),ne=n(e,"DIV",{class:!0});var Wt=r(ne);g(yt.$$.fragment,Wt),Br=d(Wt),jo=n(Wt,"P",{});var Di=r(jo);Gr=i(Di,"Loss function suitable for causal language modeling (CLM), that is, the task of guessing the next token."),Di.forEach(o),Kr=d(Wt),g(He.$$.fragment,Wt),Wt.forEach(o),Ls=d(e),re=n(e,"DIV",{class:!0});var Xt=r(re);g(wt.$$.fragment,Xt),Rr=d(Xt),Qo=n(Xt,"P",{});var Li=r(Qo);Ur=i(Li,"Loss function suitable for masked language modeling (MLM), that is, the task of guessing the masked tokens."),Li.forEach(o),Wr=d(Xt),g(Me.$$.fragment,Xt),Xt.forEach(o),Ps=d(e),be=n(e,"DIV",{class:!0});var Js=r(be);g(Tt.$$.fragment,Js),Xr=d(Js),Bo=n(Js,"P",{});var Pi=r(Bo);Jr=i(Pi,"Loss function suitable for multiple choice tasks."),Pi.forEach(o),Js.forEach(o),Cs=d(e),ye=n(e,"DIV",{class:!0});var Ys=r(ye);g(kt.$$.fragment,Ys),Yr=d(Ys),Go=n(Ys,"P",{});var Ci=r(Go);Zr=i(Ci,"Loss function suitable for question answering."),Ci.forEach(o),Ys.forEach(o),zs=d(e),we=n(e,"DIV",{class:!0});var Zs=r(we);g(qt.$$.fragment,Zs),ea=d(Zs),Ko=n(Zs,"P",{});var zi=r(Ko);ta=i(zi,"Loss function suitable for sequence classification."),zi.forEach(o),Zs.forEach(o),Ss=d(e),ae=n(e,"DIV",{class:!0});var Jt=r(ae);g(Et.$$.fragment,Jt),oa=d(Jt),Ro=n(Jt,"P",{});var Si=r(Ro);sa=i(Si,"Loss function suitable for token classification."),Si.forEach(o),na=d(Jt),g(Ve.$$.fragment,Jt),Jt.forEach(o),Fs=d(e),Te=n(e,"H2",{class:!0});var en=r(Te);je=n(en,"A",{id:!0,class:!0,href:!0});var Fi=r(je);Uo=n(Fi,"SPAN",{});var Ai=r(Uo);g(xt.$$.fragment,Ai),Ai.forEach(o),Fi.forEach(o),ra=d(en),Wo=n(en,"SPAN",{});var Oi=r(Wo);aa=i(Oi,"TensorFlow Helper Functions"),Oi.forEach(o),en.forEach(o),As=d(e),ke=n(e,"DIV",{class:!0});var tn=r(ke);g(Dt.$$.fragment,tn),ia=d(tn),Lt=n(tn,"P",{});var on=r(Lt);la=i(on,"Creates a "),Xo=n(on,"CODE",{});var Ii=r(Xo);da=i(Ii,"tf.initializers.TruncatedNormal"),Ii.forEach(o),ca=i(on," with the given range."),on.forEach(o),tn.forEach(o),Os=d(e),V=n(e,"DIV",{class:!0});var Be=r(V);g(Pt.$$.fragment,Be),pa=d(Be),Jo=n(Be,"P",{});var Ni=r(Jo);ma=i(Ni,"Decorate a Keras Layer class to support Keras serialization."),Ni.forEach(o),fa=d(Be),Yo=n(Be,"P",{});var Hi=r(Yo);ua=i(Hi,"This is done by:"),Hi.forEach(o),ha=d(Be),qe=n(Be,"OL",{});var Yt=r(qe);Ee=n(Yt,"LI",{});var Zt=r(Ee);ga=i(Zt,"Adding a "),Zo=n(Zt,"CODE",{});var Mi=r(Zo);_a=i(Mi,"transformers_config"),Mi.forEach(o),va=i(Zt," dict to the Keras config dictionary in "),es=n(Zt,"CODE",{});var Vi=r(es);$a=i(Vi,"get_config"),Vi.forEach(o),ba=i(Zt,` (called by Keras at serialization time.`),Zt.forEach(o),ya=d(Yt),xe=n(Yt,"LI",{});var eo=r(xe);wa=i(eo,"Wrapping "),ts=n(eo,"CODE",{});var ji=r(ts);Ta=i(ji,"__init__"),ji.forEach(o),ka=i(eo," to accept that "),os=n(eo,"CODE",{});var Qi=r(os);qa=i(Qi,"transformers_config"),Qi.forEach(o),Ea=i(eo,` dict (passed by Keras at deserialization time) and convert it to a config object for the actual layer initializer.`),eo.forEach(o),xa=d(Yt),De=n(Yt,"LI",{});var to=r(De);Da=i(to,`Registering the class as a custom object in Keras (if the Tensorflow version supports this), so that it does not need to be supplied in `),ss=n(to,"CODE",{});var Bi=r(ss);La=i(Bi,"custom_objects"),Bi.forEach(o),Pa=i(to," in the call to "),ns=n(to,"CODE",{});var Gi=r(ns);Ca=i(Gi,"tf.keras.models.load_model"),Gi.forEach(o),za=i(to,"."),to.forEach(o),Yt.forEach(o),Be.forEach(o),Is=d(e),Le=n(e,"DIV",{class:!0});var sn=r(Le);g(Ct.$$.fragment,sn),Sa=d(sn),rs=n(sn,"P",{});var Ki=r(rs);Fa=i(Ki,"Deal with dynamic shape in tensorflow cleanly."),Ki.forEach(o),sn.forEach(o),this.h()},h(){f(p,"name","hf:doc:metadata"),f(p,"content",JSON.stringify(rl)),f(y,"id","custom-layers-and-utilities"),f(y,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(y,"href","#custom-layers-and-utilities"),f(u,"class","relative group"),f(q,"id","transformers.Conv1D"),f(q,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(q,"href","#transformers.Conv1D"),f(x,"class","relative group"),f(R,"class","docstring"),f(St,"class","docstring"),f(U,"class","docstring"),f(Pe,"class","docstring"),f(W,"class","docstring"),f(ze,"class","docstring"),f(X,"class","docstring"),f(Ft,"href","/docs/transformers/pr_16143/en/internal/modeling_utils#transformers.modeling_utils.SQuADHead"),f(he,"class","docstring"),f(At,"class","docstring"),f(J,"class","docstring"),f(Fe,"class","docstring"),f(Y,"class","docstring"),f(Ae,"id","transformers.apply_chunking_to_forward"),f(Ae,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(Ae,"href","#transformers.apply_chunking_to_forward"),f(ge,"class","relative group"),f(z,"class","docstring"),f(_e,"class","docstring"),f(Z,"class","docstring"),f(ee,"class","docstring"),f(te,"class","docstring"),f(Oe,"id","transformers.modeling_tf_utils.TFConv1D"),f(Oe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(Oe,"href","#transformers.modeling_tf_utils.TFConv1D"),f(ve,"class","relative group"),f(oe,"class","docstring"),f(_t,"href","https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24"),f(_t,"rel","nofollow"),f(le,"class","docstring"),f(M,"class","docstring"),f(de,"class","docstring"),f(se,"class","docstring"),f(Ne,"id","transformers.modeling_tf_utils.TFCausalLanguageModelingLoss"),f(Ne,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(Ne,"href","#transformers.modeling_tf_utils.TFCausalLanguageModelingLoss"),f($e,"class","relative group"),f(ne,"class","docstring"),f(re,"class","docstring"),f(be,"class","docstring"),f(ye,"class","docstring"),f(we,"class","docstring"),f(ae,"class","docstring"),f(je,"id","transformers.modeling_tf_utils.get_initializer"),f(je,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(je,"href","#transformers.modeling_tf_utils.get_initializer"),f(Te,"class","relative group"),f(ke,"class","docstring"),f(V,"class","docstring"),f(Le,"class","docstring")},m(e,c){t(document.head,p),m(e,k,c),m(e,u,c),t(u,y),t(y,A),_(E,A,null),t(u,G),t(u,O),t(O,L),m(e,j,c),m(e,S,c),t(S,P),m(e,Q,c),m(e,F,c),t(F,C),m(e,B,c),m(e,x,c),t(x,q),t(q,w),_(I,w,null),t(x,fe),t(x,K),t(K,ue),m(e,ie,c),m(e,R,c),_(Ge,R,null),t(R,nn),t(R,no),t(no,rn),t(R,an),t(R,ro),t(ro,ln),m(e,ps,c),m(e,U,c),_(Ke,U,null),t(U,dn),t(U,ao),t(ao,cn),t(U,pn),t(U,St),_(Re,St,null),m(e,ms,c),m(e,W,c),_(Ue,W,null),t(W,mn),t(W,io),t(io,fn),t(W,un),t(W,Pe),_(We,Pe,null),t(Pe,hn),_(Ce,Pe,null),m(e,fs,c),m(e,X,c),_(Xe,X,null),t(X,gn),t(X,lo),t(lo,_n),t(X,vn),t(X,ze),_(Je,ze,null),t(ze,$n),_(Se,ze,null),m(e,us,c),m(e,he,c),_(Ye,he,null),t(he,bn),t(he,Ze),t(Ze,yn),t(Ze,Ft),t(Ft,wn),t(Ze,Tn),m(e,hs,c),m(e,J,c),_(et,J,null),t(J,kn),t(J,co),t(co,qn),t(J,En),t(J,At),_(tt,At,null),m(e,gs,c),m(e,Y,c),_(ot,Y,null),t(Y,xn),t(Y,po),t(po,Dn),t(Y,Ln),t(Y,Fe),_(st,Fe,null),t(Fe,Pn),t(Fe,mo),t(mo,Cn),m(e,_s,c),m(e,ge,c),t(ge,Ae),t(Ae,fo),_(nt,fo,null),t(ge,zn),t(ge,uo),t(uo,Sn),m(e,vs,c),m(e,z,c),_(rt,z,null),t(z,Fn),t(z,N),t(N,An),t(N,ho),t(ho,On),t(N,In),t(N,go),t(go,Nn),t(N,Hn),t(N,_o),t(_o,Mn),t(N,Vn),t(N,vo),t(vo,jn),t(N,Qn),t(z,Bn),t(z,H),t(H,Gn),t(H,$o),t($o,Kn),t(H,Rn),t(H,bo),t(bo,Un),t(H,Wn),t(H,yo),t(yo,Xn),t(H,Jn),t(H,wo),t(wo,Yn),t(H,Zn),t(z,er),t(z,To),t(To,tr),t(z,or),_(at,z,null),m(e,$s,c),m(e,_e,c),_(it,_e,null),t(_e,sr),t(_e,lt),t(lt,nr),t(lt,ko),t(ko,rr),t(lt,ar),m(e,bs,c),m(e,Z,c),_(dt,Z,null),t(Z,ir),t(Z,qo),t(qo,lr),t(Z,dr),t(Z,Eo),t(Eo,cr),m(e,ys,c),m(e,ee,c),_(ct,ee,null),t(ee,pr),t(ee,xo),t(xo,mr),t(ee,fr),t(ee,Do),t(Do,ur),m(e,ws,c),m(e,te,c),_(pt,te,null),t(te,hr),t(te,Lo),t(Lo,gr),t(te,_r),t(te,Po),t(Po,vr),m(e,Ts,c),m(e,ve,c),t(ve,Oe),t(Oe,Co),_(mt,Co,null),t(ve,$r),t(ve,zo),t(zo,br),m(e,ks,c),m(e,oe,c),_(ft,oe,null),t(oe,yr),t(oe,So),t(So,wr),t(oe,Tr),t(oe,Fo),t(Fo,kr),m(e,qs,c),m(e,M,c),_(ut,M,null),t(M,qr),t(M,Ao),t(Ao,Er),t(M,xr),t(M,Oo),t(Oo,Dr),t(M,Lr),t(M,le),_(ht,le,null),t(le,Pr),t(le,Io),t(Io,Cr),t(le,zr),t(le,gt),t(gt,Sr),t(gt,_t),t(_t,Fr),t(gt,Ar),m(e,Es,c),m(e,se,c),_(vt,se,null),t(se,Or),t(se,No),t(No,Ir),t(se,Nr),t(se,de),_($t,de,null),t(de,Hr),t(de,Ho),t(Ho,Mr),t(de,Vr),_(Ie,de,null),m(e,xs,c),m(e,$e,c),t($e,Ne),t(Ne,Mo),_(bt,Mo,null),t($e,jr),t($e,Vo),t(Vo,Qr),m(e,Ds,c),m(e,ne,c),_(yt,ne,null),t(ne,Br),t(ne,jo),t(jo,Gr),t(ne,Kr),_(He,ne,null),m(e,Ls,c),m(e,re,c),_(wt,re,null),t(re,Rr),t(re,Qo),t(Qo,Ur),t(re,Wr),_(Me,re,null),m(e,Ps,c),m(e,be,c),_(Tt,be,null),t(be,Xr),t(be,Bo),t(Bo,Jr),m(e,Cs,c),m(e,ye,c),_(kt,ye,null),t(ye,Yr),t(ye,Go),t(Go,Zr),m(e,zs,c),m(e,we,c),_(qt,we,null),t(we,ea),t(we,Ko),t(Ko,ta),m(e,Ss,c),m(e,ae,c),_(Et,ae,null),t(ae,oa),t(ae,Ro),t(Ro,sa),t(ae,na),_(Ve,ae,null),m(e,Fs,c),m(e,Te,c),t(Te,je),t(je,Uo),_(xt,Uo,null),t(Te,ra),t(Te,Wo),t(Wo,aa),m(e,As,c),m(e,ke,c),_(Dt,ke,null),t(ke,ia),t(ke,Lt),t(Lt,la),t(Lt,Xo),t(Xo,da),t(Lt,ca),m(e,Os,c),m(e,V,c),_(Pt,V,null),t(V,pa),t(V,Jo),t(Jo,ma),t(V,fa),t(V,Yo),t(Yo,ua),t(V,ha),t(V,qe),t(qe,Ee),t(Ee,ga),t(Ee,Zo),t(Zo,_a),t(Ee,va),t(Ee,es),t(es,$a),t(Ee,ba),t(qe,ya),t(qe,xe),t(xe,wa),t(xe,ts),t(ts,Ta),t(xe,ka),t(xe,os),t(os,qa),t(xe,Ea),t(qe,xa),t(qe,De),t(De,Da),t(De,ss),t(ss,La),t(De,Pa),t(De,ns),t(ns,Ca),t(De,za),m(e,Is,c),m(e,Le,c),_(Ct,Le,null),t(Le,Sa),t(Le,rs),t(rs,Fa),Ns=!0},p(e,[c]){const zt={};c&2&&(zt.$$scope={dirty:c,ctx:e}),Ce.$set(zt);const as={};c&2&&(as.$$scope={dirty:c,ctx:e}),Se.$set(as);const is={};c&2&&(is.$$scope={dirty:c,ctx:e}),Ie.$set(is);const ls={};c&2&&(ls.$$scope={dirty:c,ctx:e}),He.$set(ls);const ds={};c&2&&(ds.$$scope={dirty:c,ctx:e}),Me.$set(ds);const cs={};c&2&&(cs.$$scope={dirty:c,ctx:e}),Ve.$set(cs)},i(e){Ns||(v(E.$$.fragment,e),v(I.$$.fragment,e),v(Ge.$$.fragment,e),v(Ke.$$.fragment,e),v(Re.$$.fragment,e),v(Ue.$$.fragment,e),v(We.$$.fragment,e),v(Ce.$$.fragment,e),v(Xe.$$.fragment,e),v(Je.$$.fragment,e),v(Se.$$.fragment,e),v(Ye.$$.fragment,e),v(et.$$.fragment,e),v(tt.$$.fragment,e),v(ot.$$.fragment,e),v(st.$$.fragment,e),v(nt.$$.fragment,e),v(rt.$$.fragment,e),v(at.$$.fragment,e),v(it.$$.fragment,e),v(dt.$$.fragment,e),v(ct.$$.fragment,e),v(pt.$$.fragment,e),v(mt.$$.fragment,e),v(ft.$$.fragment,e),v(ut.$$.fragment,e),v(ht.$$.fragment,e),v(vt.$$.fragment,e),v($t.$$.fragment,e),v(Ie.$$.fragment,e),v(bt.$$.fragment,e),v(yt.$$.fragment,e),v(He.$$.fragment,e),v(wt.$$.fragment,e),v(Me.$$.fragment,e),v(Tt.$$.fragment,e),v(kt.$$.fragment,e),v(qt.$$.fragment,e),v(Et.$$.fragment,e),v(Ve.$$.fragment,e),v(xt.$$.fragment,e),v(Dt.$$.fragment,e),v(Pt.$$.fragment,e),v(Ct.$$.fragment,e),Ns=!0)},o(e){$(E.$$.fragment,e),$(I.$$.fragment,e),$(Ge.$$.fragment,e),$(Ke.$$.fragment,e),$(Re.$$.fragment,e),$(Ue.$$.fragment,e),$(We.$$.fragment,e),$(Ce.$$.fragment,e),$(Xe.$$.fragment,e),$(Je.$$.fragment,e),$(Se.$$.fragment,e),$(Ye.$$.fragment,e),$(et.$$.fragment,e),$(tt.$$.fragment,e),$(ot.$$.fragment,e),$(st.$$.fragment,e),$(nt.$$.fragment,e),$(rt.$$.fragment,e),$(at.$$.fragment,e),$(it.$$.fragment,e),$(dt.$$.fragment,e),$(ct.$$.fragment,e),$(pt.$$.fragment,e),$(mt.$$.fragment,e),$(ft.$$.fragment,e),$(ut.$$.fragment,e),$(ht.$$.fragment,e),$(vt.$$.fragment,e),$($t.$$.fragment,e),$(Ie.$$.fragment,e),$(bt.$$.fragment,e),$(yt.$$.fragment,e),$(He.$$.fragment,e),$(wt.$$.fragment,e),$(Me.$$.fragment,e),$(Tt.$$.fragment,e),$(kt.$$.fragment,e),$(qt.$$.fragment,e),$(Et.$$.fragment,e),$(Ve.$$.fragment,e),$(xt.$$.fragment,e),$(Dt.$$.fragment,e),$(Pt.$$.fragment,e),$(Ct.$$.fragment,e),Ns=!1},d(e){o(p),e&&o(k),e&&o(u),b(E),e&&o(j),e&&o(S),e&&o(Q),e&&o(F),e&&o(B),e&&o(x),b(I),e&&o(ie),e&&o(R),b(Ge),e&&o(ps),e&&o(U),b(Ke),b(Re),e&&o(ms),e&&o(W),b(Ue),b(We),b(Ce),e&&o(fs),e&&o(X),b(Xe),b(Je),b(Se),e&&o(us),e&&o(he),b(Ye),e&&o(hs),e&&o(J),b(et),b(tt),e&&o(gs),e&&o(Y),b(ot),b(st),e&&o(_s),e&&o(ge),b(nt),e&&o(vs),e&&o(z),b(rt),b(at),e&&o($s),e&&o(_e),b(it),e&&o(bs),e&&o(Z),b(dt),e&&o(ys),e&&o(ee),b(ct),e&&o(ws),e&&o(te),b(pt),e&&o(Ts),e&&o(ve),b(mt),e&&o(ks),e&&o(oe),b(ft),e&&o(qs),e&&o(M),b(ut),b(ht),e&&o(Es),e&&o(se),b(vt),b($t),b(Ie),e&&o(xs),e&&o($e),b(bt),e&&o(Ds),e&&o(ne),b(yt),b(He),e&&o(Ls),e&&o(re),b(wt),b(Me),e&&o(Ps),e&&o(be),b(Tt),e&&o(Cs),e&&o(ye),b(kt),e&&o(zs),e&&o(we),b(qt),e&&o(Ss),e&&o(ae),b(Et),b(Ve),e&&o(Fs),e&&o(Te),b(xt),e&&o(As),e&&o(ke),b(Dt),e&&o(Os),e&&o(V),b(Pt),e&&o(Is),e&&o(Le),b(Ct)}}}const rl={local:"custom-layers-and-utilities",sections:[{local:"transformers.Conv1D",title:"Pytorch custom modules"},{local:"transformers.apply_chunking_to_forward",title:"PyTorch Helper Functions"},{local:"transformers.modeling_tf_utils.TFConv1D",title:"TensorFlow custom layers"},{local:"transformers.modeling_tf_utils.TFCausalLanguageModelingLoss",title:"TensorFlow loss functions"},{local:"transformers.modeling_tf_utils.get_initializer",title:"TensorFlow Helper Functions"}],title:"Custom Layers and Utilities"};function al(D,p,k){let{fw:u}=p;return D.$$set=y=>{"fw"in y&&k(0,u=y.fw)},[u]}class fl extends Ri{constructor(p){super();Ui(this,p,al,nl,Wi,{fw:0})}}export{fl as default,rl as metadata};
421
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/internal/tokenization_utils.mdx-eeb10b9c.js
import{S as Km,i as Qm,s as Zm,e as r,k as d,w as u,t as o,M as eh,c as s,d as t,m as c,a,x as f,h as n,b as l,F as e,g as $,y as _,q as g,o as k,B as b}from"../../chunks/vendor-4833417e.js";import{T as Pr}from"../../chunks/Tip-fffd6df1.js";import{D as z}from"../../chunks/Docstring-4f315ed9.js";import{C as _s}from"../../chunks/CodeBlock-6a3d1b46.js";import{I as gs}from"../../chunks/IconCopyLink-4b81c553.js";import"../../chunks/CopyButton-dacfbfaf.js";function th(W){let h,P,v,y,E;return{c(){h=r("p"),P=o("This method is deprecated, "),v=r("code"),y=o("__call__"),E=o(" should be used instead.")},l(w){h=s(w,"P",{});var x=a(h);P=n(x,"This method is deprecated, "),v=s(x,"CODE",{});var B=a(v);y=n(B,"__call__"),B.forEach(t),E=n(x," should be used instead."),x.forEach(t)},m(w,x){$(w,h,x),e(h,P),e(h,v),e(v,y),e(h,E)},d(w){w&&t(h)}}}function oh(W){let h,P,v,y,E;return{c(){h=r("p"),P=o("This method is deprecated, "),v=r("code"),y=o("__call__"),E=o(" should be used instead.")},l(w){h=s(w,"P",{});var x=a(h);P=n(x,"This method is deprecated, "),v=s(x,"CODE",{});var B=a(v);y=n(B,"__call__"),B.forEach(t),E=n(x," should be used instead."),x.forEach(t)},m(w,x){$(w,h,x),e(h,P),e(h,v),e(v,y),e(h,E)},d(w){w&&t(h)}}}function nh(W){let h,P,v,y,E;return{c(){h=r("p"),P=o("Passing "),v=r("code"),y=o("use_auth_token=True"),E=o(" is required when you want to use a private model.")},l(w){h=s(w,"P",{});var x=a(h);P=n(x,"Passing "),v=s(x,"CODE",{});var B=a(v);y=n(B,"use_auth_token=True"),B.forEach(t),E=n(x," is required when you want to use a private model."),x.forEach(t)},m(w,x){$(w,h,x),e(h,P),e(h,v),e(v,y),e(h,E)},d(w){w&&t(h)}}}function rh(W){let h,P,v,y,E,w,x,B;return{c(){h=r("p"),P=o("If the "),v=r("code"),y=o("encoded_inputs"),E=o(` passed are dictionary of numpy arrays, PyTorch tensors or TensorFlow tensors, the result will use the same type unless you provide a different tensor type with `),w=r("code"),x=o("return_tensors"),B=o(`. In the case of PyTorch tensors, you will lose the specific device of your tensors however.`)},l(ge){h=s(ge,"P",{});var j=a(h);P=n(j,"If the "),v=s(j,"CODE",{});var D=a(v);y=n(D,"encoded_inputs"),D.forEach(t),E=n(j,` passed are dictionary of numpy arrays, PyTorch tensors or TensorFlow tensors, the result will use the same type unless you provide a different tensor type with `),w=s(j,"CODE",{});var ro=a(w);x=n(ro,"return_tensors"),ro.forEach(t),B=n(j,`. In the case of PyTorch tensors, you will lose the specific device of your tensors however.`),j.forEach(t)},m(ge,j){$(ge,h,j),e(h,P),e(h,v),e(v,y),e(h,E),e(h,w),e(w,x),e(h,B)},d(ge){ge&&t(h)}}}function sh(W){let h,P;return{c(){h=r("p"),P=o("This API is experimental and may have some slight breaking changes in the next releases.")},l(v){h=s(v,"P",{});var y=a(h);P=n(y,"This API is experimental and may have some slight breaking changes in the next releases."),y.forEach(t)},m(v,y){$(v,h,y),e(h,P)},d(v){v&&t(h)}}}function ah(W){let h,P,v,y,E,w,x,B,ge,j,D,ro,so,ks,bs,ao,vs,Ts,io,ys,ws,co,zs,xs,qr,lo,$s,Er,ke,De,So,ot,Ps,Co,qs,Br,p,nt,Es,be,Bs,po,Ls,Ds,mo,Is,Ns,As,Oo,Fs,Ss,Wo,Cs,Os,I,te,jo,Ws,js,Mo,Ms,Rs,Ro,Us,Vs,Hs,F,Uo,Gs,Xs,Vo,Ys,Js,Ho,Ks,Qs,Go,Zs,ea,Xo,ta,oa,na,V,Yo,ra,sa,Jo,aa,ia,Ko,da,ca,Qo,la,pa,ma,S,Zo,ha,ua,en,fa,_a,tn,ga,ka,on,ba,va,ho,Ta,ya,wa,Ie,nn,za,xa,rn,$a,Pa,qa,H,sn,Ea,Ba,an,La,Da,dn,Ia,Na,cn,Aa,Fa,Sa,G,ln,Ca,Oa,pn,Wa,ja,mn,Ma,Ra,hn,Ua,Va,Ha,Ne,rt,Ga,un,Xa,Ya,Ae,st,Ja,fn,Ka,Qa,Fe,at,Za,_n,ei,ti,oe,it,oi,gn,ni,ri,Se,si,ne,dt,ai,kn,ii,di,bn,ci,li,Ce,ct,pi,vn,mi,hi,Oe,lt,ui,pt,fi,Tn,_i,gi,ki,re,mt,bi,uo,vi,fo,Ti,yi,yn,wi,zi,se,ht,xi,wn,$i,Pi,ut,qi,zn,Ei,Bi,Li,ae,ft,Di,xn,Ii,Ni,_t,Ai,$n,Fi,Si,Ci,ie,gt,Oi,Pn,Wi,ji,We,Mi,C,kt,Ri,bt,Ui,_o,Vi,Hi,Gi,je,Xi,qn,Yi,Ji,vt,Ki,Me,Tt,Qi,ve,Zi,En,ed,td,Bn,od,nd,rd,de,yt,sd,Ln,ad,id,ce,Dn,dd,cd,In,ld,pd,Nn,md,hd,ud,X,wt,fd,An,_d,gd,Q,kd,Fn,bd,vd,Sn,Td,yd,Cn,wd,zd,xd,Re,$d,Ue,zt,Pd,M,qd,On,Ed,Bd,Wn,Ld,Dd,jn,Id,Nd,Mn,Ad,Fd,Sd,Ve,xt,Cd,Rn,Od,Wd,Y,$t,jd,Pt,Md,Un,Rd,Ud,Vd,Vn,Hd,Gd,qt,Xd,le,Et,Yd,Bt,Jd,Hn,Kd,Qd,Zd,He,ec,J,Lt,tc,Gn,oc,nc,Dt,rc,Xn,sc,ac,ic,It,dc,Yn,cc,lc,pc,pe,Nt,mc,Jn,hc,uc,At,fc,Kn,_c,gc,kc,Ge,Ft,bc,St,vc,Qn,Tc,yc,wc,Xe,Ct,zc,Zn,xc,Lr,Te,Ye,er,Ot,$c,tr,Pc,Dr,A,Wt,qc,ye,Ec,go,Bc,Lc,ko,Dc,Ic,Nc,q,jt,Ac,or,Fc,Sc,nr,Cc,Oc,Mt,Wc,bo,jc,Mc,Rc,Rt,Uc,rr,Vc,Hc,Gc,Ut,sr,Xc,Yc,Vt,Jc,ar,Kc,Qc,Zc,R,el,vo,tl,ol,ir,nl,rl,dr,sl,al,cr,il,dl,cl,lr,ll,pl,Ht,ml,N,Gt,hl,pr,ul,fl,mr,_l,gl,Xt,kl,To,bl,vl,Tl,hr,yl,wl,Yt,zl,me,Jt,xl,we,$l,ur,Pl,ql,fr,El,Bl,Ll,_r,Dl,Ir,ze,Je,gr,Kt,Il,kr,Nl,Nr,xe,Qt,Al,$e,Fl,br,Sl,Cl,Ke,Ol,vr,Wl,jl,Ml,Ar,Pe,Zt,Rl,Tr,Ul,Fr,qe,eo,Vl,yr,Hl,Sr;return w=new gs({}),ot=new gs({}),nt=new z({props:{name:"class transformers.PreTrainedTokenizerBase",anchor:"transformers.PreTrainedTokenizerBase",parameters:[{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L1433",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.model_max_length",description:`<strong>model_max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; The maximum length (in number of tokens) for the inputs to the transformer model. When the tokenizer is loaded with <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.from_pretrained">from_pretrained()</a>, this will be set to the value stored for the associated model in <code>max_model_input_sizes</code> (see above). If no value is provided, will default to VERY_LARGE_INTEGER (<code>int(1e30)</code>).`,name:"model_max_length"},{anchor:"transformers.PreTrainedTokenizerBase.padding_side",description:`<strong>padding_side</strong> (<code>str</code>, <em>optional</em>) &#x2014; The side on which the model should have padding applied. Should be selected between [&#x2018;right&#x2019;, &#x2018;left&#x2019;]. Default value is picked from the class attribute of the same name.`,name:"padding_side"},{anchor:"transformers.PreTrainedTokenizerBase.truncation_side",description:`<strong>truncation_side</strong> (<code>str</code>, <em>optional</em>) &#x2014; The side on which the model should have truncation applied. Should be selected between [&#x2018;right&#x2019;, &#x2018;left&#x2019;]. Default value is picked from the class attribute of the same name.`,name:"truncation_side"},{anchor:"transformers.PreTrainedTokenizerBase.model_input_names",description:`<strong>model_input_names</strong> (<code>List[string]</code>, <em>optional</em>) &#x2014; The list of inputs accepted by the forward pass of the model (like <code>&quot;token_type_ids&quot;</code> or <code>&quot;attention_mask&quot;</code>). Default value is picked from the class attribute of the same name.`,name:"model_input_names"},{anchor:"transformers.PreTrainedTokenizerBase.bos_token",description:`<strong>bos_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the beginning of a sentence. Will be associated to <code>self.bos_token</code> and <code>self.bos_token_id</code>.`,name:"bos_token"},{anchor:"transformers.PreTrainedTokenizerBase.eos_token",description:`<strong>eos_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the end of a sentence. Will be associated to <code>self.eos_token</code> and <code>self.eos_token_id</code>.`,name:"eos_token"},{anchor:"transformers.PreTrainedTokenizerBase.unk_token",description:`<strong>unk_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing an out-of-vocabulary token. Will be associated to <code>self.unk_token</code> and <code>self.unk_token_id</code>.`,name:"unk_token"},{anchor:"transformers.PreTrainedTokenizerBase.sep_token",description:`<strong>sep_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token separating two different sentences in the same input (used by BERT for instance). Will be associated to <code>self.sep_token</code> and <code>self.sep_token_id</code>.`,name:"sep_token"},{anchor:"transformers.PreTrainedTokenizerBase.pad_token",description:`<strong>pad_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by attention mechanisms or loss computation. Will be associated to <code>self.pad_token</code> and <code>self.pad_token_id</code>.`,name:"pad_token"},{anchor:"transformers.PreTrainedTokenizerBase.cls_token",description:`<strong>cls_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the class of the input (used by BERT for instance). Will be associated to <code>self.cls_token</code> and <code>self.cls_token_id</code>.`,name:"cls_token"},{anchor:"transformers.PreTrainedTokenizerBase.mask_token",description:`<strong>mask_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing a masked token (used by masked-language modeling pretraining objectives, like BERT). Will be associated to <code>self.mask_token</code> and <code>self.mask_token_id</code>.`,name:"mask_token"},{anchor:"transformers.PreTrainedTokenizerBase.additional_special_tokens",description:`<strong>additional_special_tokens</strong> (tuple or list of <code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A tuple or a list of additional special tokens. Add them here to ensure they won&#x2019;t be split by the tokenization process. Will be associated to <code>self.additional_special_tokens</code> and <code>self.additional_special_tokens_ids</code>.`,name:"additional_special_tokens"}]}}),rt=new z({props:{name:"__call__",anchor:"transformers.PreTrainedTokenizerBase.__call__",parameters:[{name:"text",val:": typing.Union[str, typing.List[str], typing.List[typing.List[str]]]"},{name:"text_pair",val:": typing.Union[str, typing.List[str], typing.List[typing.List[str]], NoneType] = None"},{name:"add_special_tokens",val:": bool = True"},{name:"padding",val:": typing.Union[bool, str, transformers.file_utils.PaddingStrategy] = False"},{name:"truncation",val:": typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"stride",val:": int = 0"},{name:"is_split_into_words",val:": bool = False"},{name:"pad_to_multiple_of",val:": typing.Optional[int] = None"},{name:"return_tensors",val:": typing.Union[str, transformers.file_utils.TensorType, NoneType] = None"},{name:"return_token_type_ids",val:": typing.Optional[bool] = None"},{name:"return_attention_mask",val:": typing.Optional[bool] = None"},{name:"return_overflowing_tokens",val:": bool = False"},{name:"return_special_tokens_mask",val:": bool = False"},{name:"return_offsets_mapping",val:": bool = False"},{name:"return_length",val:": bool = False"},{name:"verbose",val:": bool = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L2379",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.__call__.text",description:`<strong>text</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set <code>is_split_into_words=True</code> (to lift the ambiguity with a batch of sequences).`,name:"text"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.text_pair",description:`<strong>text_pair</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set <code>is_split_into_words=True</code> (to lift the ambiguity with a batch of sequences).`,name:"text_pair"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.add_special_tokens",description:`<strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.`,name:"add_special_tokens"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.truncation",description:`<strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul>`,name:"truncation"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.`,name:"max_length"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.stride",description:`<strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.`,name:"stride"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.is_split_into_words",description:`<strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.`,name:"is_split_into_words"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).`,name:"pad_to_multiple_of"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul>`,name:"return_tensors"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_token_type_ids",description:`<strong>return_token_type_ids</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"return_token_type_ids"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_attention_mask",description:`<strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"return_attention_mask"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_overflowing_tokens",description:`<strong>return_overflowing_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with <code>truncation_strategy = longest_first</code> or <code>True</code>, an error is raised instead of returning overflowing tokens.`,name:"return_overflowing_tokens"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_special_tokens_mask",description:`<strong>return_special_tokens_mask</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return special tokens mask information.`,name:"return_special_tokens_mask"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_offsets_mapping",description:`<strong>return_offsets_mapping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return <code>(char_start, char_end)</code> for each token.</p> <p>This is only available on fast tokenizers inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>, if using Python&#x2019;s tokenizer, this method will raise <code>NotImplementedError</code>.`,name:"return_offsets_mapping"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.return_length",description:`<strong>return_length</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the lengths of the encoded inputs.`,name:"return_length"},{anchor:"transformers.PreTrainedTokenizerBase.__call__.verbose",description:`<strong>verbose</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to print more information and warnings. **kwargs &#x2014; passed to the <code>self.tokenize()</code> method`,name:"verbose"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a> with the following fields:</p> <ul> <li> <p><strong>input_ids</strong> \u2014 List of token ids to be fed to a model.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> </li> <li> <p><strong>token_type_ids</strong> \u2014 List of token type ids to be fed to a model (when <code>return_token_type_ids=True</code> or if <em>\u201Ctoken_type_ids\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a></p> </li> <li> <p><strong>attention_mask</strong> \u2014 List of indices specifying which tokens should be attended to by the model (when <code>return_attention_mask=True</code> or if <em>\u201Cattention_mask\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> </li> <li> <p><strong>overflowing_tokens</strong> \u2014 List of overflowing tokens sequences (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>num_truncated_tokens</strong> \u2014 Number of tokens truncated (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>special_tokens_mask</strong> \u2014 List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when <code>add_special_tokens=True</code> and <code>return_special_tokens_mask=True</code>).</p> </li> <li> <p><strong>length</strong> \u2014 The length of the inputs (when <code>return_length=True</code>)</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></p> `}}),st=new z({props:{name:"as_target_tokenizer",anchor:"transformers.PreTrainedTokenizerBase.as_target_tokenizer",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L3404"}}),at=new z({props:{name:"batch_decode",anchor:"transformers.PreTrainedTokenizerBase.batch_decode",parameters:[{name:"sequences",val:": typing.Union[typing.List[int], typing.List[typing.List[int]], ForwardRef('np.ndarray'), ForwardRef('torch.Tensor'), ForwardRef('tf.Tensor')]"},{name:"skip_special_tokens",val:": bool = False"},{name:"clean_up_tokenization_spaces",val:": bool = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L3250",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.batch_decode.sequences",description:`<strong>sequences</strong> (<code>Union[List[int], List[List[int]], np.ndarray, torch.Tensor, tf.Tensor]</code>) &#x2014; List of tokenized input ids. Can be obtained using the <code>__call__</code> method.`,name:"sequences"},{anchor:"transformers.PreTrainedTokenizerBase.batch_decode.skip_special_tokens",description:`<strong>skip_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to remove special tokens in the decoding.`,name:"skip_special_tokens"},{anchor:"transformers.PreTrainedTokenizerBase.batch_decode.clean_up_tokenization_spaces",description:`<strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to clean up the tokenization spaces.`,name:"clean_up_tokenization_spaces"},{anchor:"transformers.PreTrainedTokenizerBase.batch_decode.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Will be passed to the underlying model specific decode method.`,name:"kwargs"}],returnDescription:` <p>The list of decoded sentences.</p> `,returnType:` <p><code>List[str]</code></p> `}}),it=new z({props:{name:"batch_encode_plus",anchor:"transformers.PreTrainedTokenizerBase.batch_encode_plus",parameters:[{name:"batch_text_or_text_pairs",val:": typing.Union[typing.List[str], typing.List[typing.Tuple[str, str]], typing.List[typing.List[str]], typing.List[typing.Tuple[typing.List[str], typing.List[str]]], typing.List[typing.List[int]], typing.List[typing.Tuple[typing.List[int], typing.List[int]]]]"},{name:"add_special_tokens",val:": bool = True"},{name:"padding",val:": typing.Union[bool, str, transformers.file_utils.PaddingStrategy] = False"},{name:"truncation",val:": typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"stride",val:": int = 0"},{name:"is_split_into_words",val:": bool = False"},{name:"pad_to_multiple_of",val:": typing.Optional[int] = None"},{name:"return_tensors",val:": typing.Union[str, transformers.file_utils.TensorType, NoneType] = None"},{name:"return_token_type_ids",val:": typing.Optional[bool] = None"},{name:"return_attention_mask",val:": typing.Optional[bool] = None"},{name:"return_overflowing_tokens",val:": bool = False"},{name:"return_special_tokens_mask",val:": bool = False"},{name:"return_offsets_mapping",val:": bool = False"},{name:"return_length",val:": bool = False"},{name:"verbose",val:": bool = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L2600",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.batch_encode_plus.batch_text_or_text_pairs",description:`<strong>batch_text_or_text_pairs</strong> (<code>List[str]</code>, <code>List[Tuple[str, str]]</code>, <code>List[List[str]]</code>, <code>List[Tuple[List[str], List[str]]]</code>, and for not-fast tokenizers, also <code>List[List[int]]</code>, <code>List[Tuple[List[int], List[int]]]</code>) &#x2014; Batch of sequences or pair of sequences to be encoded. This can be a list of string/string-sequences/int-sequences or a list of pair of string/string-sequences/int-sequence (see details in <code>encode_plus</code>).`,name:"batch_text_or_text_pairs"},{anchor:"transformers.PreTrainedTokenizerBase.batch_encode_plus.add_special_tokens",description:`<strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.`,name:"add_special_tokens"},{anchor:"transformers.PreTrainedTokenizerBase.batch_encode_plus.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.PreTrainedTokenizerBase.batch_encode_plus.truncation",description:`<strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul>`,name:"truncation"},{anchor:"transformers.PreTrainedTokenizerBase.batch_encode_plus.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.`,name:"max_length"},{anchor:"transformers.PreTrainedTokenizerBase.batch_encode_plus.stride",description:`<strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.`,name:"stride"},{anchor:"transformers.PreTrainedTokenizerBase.batch_encode_plus.is_split_into_words",description:`<strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.`,name:"is_split_into_words"},{anchor:"transformers.PreTrainedTokenizerBase.batch_encode_plus.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).`,name:"pad_to_multiple_of"},{anchor:"transformers.PreTrainedTokenizerBase.batch_encode_plus.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul>`,name:"return_tensors"},{anchor:"transformers.PreTrainedTokenizerBase.batch_encode_plus.return_token_type_ids",description:`<strong>return_token_type_ids</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"return_token_type_ids"},{anchor:"transformers.PreTrainedTokenizerBase.batch_encode_plus.return_attention_mask",description:`<strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"return_attention_mask"},{anchor:"transformers.PreTrainedTokenizerBase.batch_encode_plus.return_overflowing_tokens",description:`<strong>return_overflowing_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with <code>truncation_strategy = longest_first</code> or <code>True</code>, an error is raised instead of returning overflowing tokens.`,name:"return_overflowing_tokens"},{anchor:"transformers.PreTrainedTokenizerBase.batch_encode_plus.return_special_tokens_mask",description:`<strong>return_special_tokens_mask</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return special tokens mask information.`,name:"return_special_tokens_mask"},{anchor:"transformers.PreTrainedTokenizerBase.batch_encode_plus.return_offsets_mapping",description:`<strong>return_offsets_mapping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return <code>(char_start, char_end)</code> for each token.</p> <p>This is only available on fast tokenizers inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>, if using Python&#x2019;s tokenizer, this method will raise <code>NotImplementedError</code>.`,name:"return_offsets_mapping"},{anchor:"transformers.PreTrainedTokenizerBase.batch_encode_plus.return_length",description:`<strong>return_length</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the lengths of the encoded inputs.`,name:"return_length"},{anchor:"transformers.PreTrainedTokenizerBase.batch_encode_plus.verbose",description:`<strong>verbose</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to print more information and warnings. **kwargs &#x2014; passed to the <code>self.tokenize()</code> method`,name:"verbose"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a> with the following fields:</p> <ul> <li> <p><strong>input_ids</strong> \u2014 List of token ids to be fed to a model.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> </li> <li> <p><strong>token_type_ids</strong> \u2014 List of token type ids to be fed to a model (when <code>return_token_type_ids=True</code> or if <em>\u201Ctoken_type_ids\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a></p> </li> <li> <p><strong>attention_mask</strong> \u2014 List of indices specifying which tokens should be attended to by the model (when <code>return_attention_mask=True</code> or if <em>\u201Cattention_mask\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> </li> <li> <p><strong>overflowing_tokens</strong> \u2014 List of overflowing tokens sequences (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>num_truncated_tokens</strong> \u2014 Number of tokens truncated (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>special_tokens_mask</strong> \u2014 List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when <code>add_special_tokens=True</code> and <code>return_special_tokens_mask=True</code>).</p> </li> <li> <p><strong>length</strong> \u2014 The length of the inputs (when <code>return_length=True</code>)</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></p> `}}),Se=new Pr({props:{warning:"&lcub;true}",$$slots:{default:[th]},$$scope:{ctx:W}}}),dt=new z({props:{name:"build_inputs_with_special_tokens",anchor:"transformers.PreTrainedTokenizerBase.build_inputs_with_special_tokens",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L2884",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.build_inputs_with_special_tokens.token_ids_0",description:"<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; The first tokenized sequence.",name:"token_ids_0"},{anchor:"transformers.PreTrainedTokenizerBase.build_inputs_with_special_tokens.token_ids_1",description:"<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; The second tokenized sequence.",name:"token_ids_1"}],returnDescription:` <p>The model input with special tokens.</p> `,returnType:` <p><code>List[int]</code></p> `}}),ct=new z({props:{name:"clean_up_tokenization",anchor:"transformers.PreTrainedTokenizerBase.clean_up_tokenization",parameters:[{name:"out_string",val:": str"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L3359",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.clean_up_tokenization.out_string",description:"<strong>out_string</strong> (<code>str</code>) &#x2014; The text to clean up.",name:"out_string"}],returnDescription:` <p>The cleaned-up string.</p> `,returnType:` <p><code>str</code></p> `}}),lt=new z({props:{name:"convert_tokens_to_string",anchor:"transformers.PreTrainedTokenizerBase.convert_tokens_to_string",parameters:[{name:"tokens",val:": typing.List[str]"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L3237",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.convert_tokens_to_string.tokens",description:"<strong>tokens</strong> (<code>List[str]</code>) &#x2014; The token to join in a string.",name:"tokens"}],returnDescription:` <p>The joined tokens.</p> `,returnType:` <p><code>str</code></p> `}}),mt=new z({props:{name:"create_token_type_ids_from_sequences",anchor:"transformers.PreTrainedTokenizerBase.create_token_type_ids_from_sequences",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L2864",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.create_token_type_ids_from_sequences.token_ids_0",description:"<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; The first tokenized sequence.",name:"token_ids_0"},{anchor:"transformers.PreTrainedTokenizerBase.create_token_type_ids_from_sequences.token_ids_1",description:"<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; The second tokenized sequence.",name:"token_ids_1"}],returnDescription:` <p>The token type ids.</p> `,returnType:` <p><code>List[int]</code></p> `}}),ht=new z({props:{name:"decode",anchor:"transformers.PreTrainedTokenizerBase.decode",parameters:[{name:"token_ids",val:": typing.Union[int, typing.List[int], ForwardRef('np.ndarray'), ForwardRef('torch.Tensor'), ForwardRef('tf.Tensor')]"},{name:"skip_special_tokens",val:": bool = False"},{name:"clean_up_tokenization_spaces",val:": bool = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L3283",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.decode.token_ids",description:`<strong>token_ids</strong> (<code>Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]</code>) &#x2014; List of tokenized input ids. Can be obtained using the <code>__call__</code> method.`,name:"token_ids"},{anchor:"transformers.PreTrainedTokenizerBase.decode.skip_special_tokens",description:`<strong>skip_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to remove special tokens in the decoding.`,name:"skip_special_tokens"},{anchor:"transformers.PreTrainedTokenizerBase.decode.clean_up_tokenization_spaces",description:`<strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to clean up the tokenization spaces.`,name:"clean_up_tokenization_spaces"},{anchor:"transformers.PreTrainedTokenizerBase.decode.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Will be passed to the underlying model specific decode method.`,name:"kwargs"}],returnDescription:` <p>The decoded sentence.</p> `,returnType:` <p><code>str</code></p> `}}),ft=new z({props:{name:"encode",anchor:"transformers.PreTrainedTokenizerBase.encode",parameters:[{name:"text",val:": typing.Union[str, typing.List[str], typing.List[int]]"},{name:"text_pair",val:": typing.Union[str, typing.List[str], typing.List[int], NoneType] = None"},{name:"add_special_tokens",val:": bool = True"},{name:"padding",val:": typing.Union[bool, str, transformers.file_utils.PaddingStrategy] = False"},{name:"truncation",val:": typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"stride",val:": int = 0"},{name:"return_tensors",val:": typing.Union[str, transformers.file_utils.TensorType, NoneType] = None"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L2189",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.encode.text",description:`<strong>text</strong> (<code>str</code>, <code>List[str]</code> or <code>List[int]</code>) &#x2014; The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the <code>tokenize</code> method) or a list of integers (tokenized string ids using the <code>convert_tokens_to_ids</code> method).`,name:"text"},{anchor:"transformers.PreTrainedTokenizerBase.encode.text_pair",description:`<strong>text_pair</strong> (<code>str</code>, <code>List[str]</code> or <code>List[int]</code>, <em>optional</em>) &#x2014; Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the <code>tokenize</code> method) or a list of integers (tokenized string ids using the <code>convert_tokens_to_ids</code> method).`,name:"text_pair"},{anchor:"transformers.PreTrainedTokenizerBase.encode.add_special_tokens",description:`<strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.`,name:"add_special_tokens"},{anchor:"transformers.PreTrainedTokenizerBase.encode.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.PreTrainedTokenizerBase.encode.truncation",description:`<strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul>`,name:"truncation"},{anchor:"transformers.PreTrainedTokenizerBase.encode.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.`,name:"max_length"},{anchor:"transformers.PreTrainedTokenizerBase.encode.stride",description:`<strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.`,name:"stride"},{anchor:"transformers.PreTrainedTokenizerBase.encode.is_split_into_words",description:`<strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.`,name:"is_split_into_words"},{anchor:"transformers.PreTrainedTokenizerBase.encode.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).`,name:"pad_to_multiple_of"},{anchor:"transformers.PreTrainedTokenizerBase.encode.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul>`,name:"return_tensors"}],returnDescription:` <p>The tokenized ids of the text.</p> `,returnType:` <p><code>List[int]</code>, <code>torch.Tensor</code>, <code>tf.Tensor</code> or <code>np.ndarray</code></p> `}}),gt=new z({props:{name:"encode_plus",anchor:"transformers.PreTrainedTokenizerBase.encode_plus",parameters:[{name:"text",val:": typing.Union[str, typing.List[str], typing.List[int]]"},{name:"text_pair",val:": typing.Union[str, typing.List[str], typing.List[int], NoneType] = None"},{name:"add_special_tokens",val:": bool = True"},{name:"padding",val:": typing.Union[bool, str, transformers.file_utils.PaddingStrategy] = False"},{name:"truncation",val:": typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"stride",val:": int = 0"},{name:"is_split_into_words",val:": bool = False"},{name:"pad_to_multiple_of",val:": typing.Optional[int] = None"},{name:"return_tensors",val:": typing.Union[str, transformers.file_utils.TensorType, NoneType] = None"},{name:"return_token_type_ids",val:": typing.Optional[bool] = None"},{name:"return_attention_mask",val:": typing.Optional[bool] = None"},{name:"return_overflowing_tokens",val:": bool = False"},{name:"return_special_tokens_mask",val:": bool = False"},{name:"return_offsets_mapping",val:": bool = False"},{name:"return_length",val:": bool = False"},{name:"verbose",val:": bool = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L2504",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.encode_plus.text",description:`<strong>text</strong> (<code>str</code>, <code>List[str]</code> or <code>List[int]</code> (the latter only for not-fast tokenizers)) &#x2014; The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the <code>tokenize</code> method) or a list of integers (tokenized string ids using the <code>convert_tokens_to_ids</code> method).`,name:"text"},{anchor:"transformers.PreTrainedTokenizerBase.encode_plus.text_pair",description:`<strong>text_pair</strong> (<code>str</code>, <code>List[str]</code> or <code>List[int]</code>, <em>optional</em>) &#x2014; Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the <code>tokenize</code> method) or a list of integers (tokenized string ids using the <code>convert_tokens_to_ids</code> method).`,name:"text_pair"},{anchor:"transformers.PreTrainedTokenizerBase.encode_plus.add_special_tokens",description:`<strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.`,name:"add_special_tokens"},{anchor:"transformers.PreTrainedTokenizerBase.encode_plus.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.PreTrainedTokenizerBase.encode_plus.truncation",description:`<strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul>`,name:"truncation"},{anchor:"transformers.PreTrainedTokenizerBase.encode_plus.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.`,name:"max_length"},{anchor:"transformers.PreTrainedTokenizerBase.encode_plus.stride",description:`<strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.`,name:"stride"},{anchor:"transformers.PreTrainedTokenizerBase.encode_plus.is_split_into_words",description:`<strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.`,name:"is_split_into_words"},{anchor:"transformers.PreTrainedTokenizerBase.encode_plus.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).`,name:"pad_to_multiple_of"},{anchor:"transformers.PreTrainedTokenizerBase.encode_plus.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul>`,name:"return_tensors"},{anchor:"transformers.PreTrainedTokenizerBase.encode_plus.return_token_type_ids",description:`<strong>return_token_type_ids</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"return_token_type_ids"},{anchor:"transformers.PreTrainedTokenizerBase.encode_plus.return_attention_mask",description:`<strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"return_attention_mask"},{anchor:"transformers.PreTrainedTokenizerBase.encode_plus.return_overflowing_tokens",description:`<strong>return_overflowing_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with <code>truncation_strategy = longest_first</code> or <code>True</code>, an error is raised instead of returning overflowing tokens.`,name:"return_overflowing_tokens"},{anchor:"transformers.PreTrainedTokenizerBase.encode_plus.return_special_tokens_mask",description:`<strong>return_special_tokens_mask</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return special tokens mask information.`,name:"return_special_tokens_mask"},{anchor:"transformers.PreTrainedTokenizerBase.encode_plus.return_offsets_mapping",description:`<strong>return_offsets_mapping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return <code>(char_start, char_end)</code> for each token.</p> <p>This is only available on fast tokenizers inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>, if using Python&#x2019;s tokenizer, this method will raise <code>NotImplementedError</code>.`,name:"return_offsets_mapping"},{anchor:"transformers.PreTrainedTokenizerBase.encode_plus.return_length",description:`<strong>return_length</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the lengths of the encoded inputs.`,name:"return_length"},{anchor:"transformers.PreTrainedTokenizerBase.encode_plus.verbose",description:`<strong>verbose</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to print more information and warnings. **kwargs &#x2014; passed to the <code>self.tokenize()</code> method`,name:"verbose"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a> with the following fields:</p> <ul> <li> <p><strong>input_ids</strong> \u2014 List of token ids to be fed to a model.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> </li> <li> <p><strong>token_type_ids</strong> \u2014 List of token type ids to be fed to a model (when <code>return_token_type_ids=True</code> or if <em>\u201Ctoken_type_ids\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a></p> </li> <li> <p><strong>attention_mask</strong> \u2014 List of indices specifying which tokens should be attended to by the model (when <code>return_attention_mask=True</code> or if <em>\u201Cattention_mask\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> </li> <li> <p><strong>overflowing_tokens</strong> \u2014 List of overflowing tokens sequences (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>num_truncated_tokens</strong> \u2014 Number of tokens truncated (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>special_tokens_mask</strong> \u2014 List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when <code>add_special_tokens=True</code> and <code>return_special_tokens_mask=True</code>).</p> </li> <li> <p><strong>length</strong> \u2014 The length of the inputs (when <code>return_length=True</code>)</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></p> `}}),We=new Pr({props:{warning:"&lcub;true}",$$slots:{default:[oh]},$$scope:{ctx:W}}}),kt=new z({props:{name:"from_pretrained",anchor:"transformers.PreTrainedTokenizerBase.from_pretrained",parameters:[{name:"pretrained_model_name_or_path",val:": typing.Union[str, os.PathLike]"},{name:"*init_inputs",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L1551",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a predefined tokenizer hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>A path to a <em>directory</em> containing vocabulary files required by the tokenizer, for instance saved using the <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.save_pretrained">save_pretrained()</a> method, e.g., <code>./my_model_directory/</code>.</li> <li>(<strong>Deprecated</strong>, not applicable to all derived classes) A path or url to a single saved vocabulary file (if and only if the tokenizer only requires a single vocabulary file like Bert or XLNet), e.g., <code>./my_model_directory/vocab.txt</code>.</li> </ul>`,name:"pretrained_model_name_or_path"},{anchor:"transformers.PreTrainedTokenizerBase.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<code>str</code> or <code>os.PathLike</code>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded predefined tokenizer vocabulary files should be cached if the standard cache should not be used.`,name:"cache_dir"},{anchor:"transformers.PreTrainedTokenizerBase.from_pretrained.force_download",description:`<strong>force_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to force the (re-)download the vocabulary files and override the cached versions if they exist.`,name:"force_download"},{anchor:"transformers.PreTrainedTokenizerBase.from_pretrained.resume_download",description:`<strong>resume_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to delete incompletely received files. Attempt to resume the download if such a file exists.`,name:"resume_download"},{anchor:"transformers.PreTrainedTokenizerBase.from_pretrained.proxies",description:`<strong>proxies</strong> (<code>Dict[str, str]</code>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <code>{&apos;http&apos;: &apos;foo.bar:3128&apos;, &apos;http://hostname&apos;: &apos;foo.bar:4012&apos;}</code>. The proxies are used on each request.`,name:"proxies"},{anchor:"transformers.PreTrainedTokenizerBase.from_pretrained.use_auth_token",description:`<strong>use_auth_token</strong> (<code>str</code> or <em>bool</em>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>transformers-cli login</code> (stored in <code>~/.huggingface</code>).`,name:"use_auth_token"},{anchor:"transformers.PreTrainedTokenizerBase.from_pretrained.local_files_only",description:`<strong>local_files_only</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to only rely on local files and not to attempt to download any files.`,name:"local_files_only"},{anchor:"transformers.PreTrainedTokenizerBase.from_pretrained.revision(str,",description:`<strong>revision(<code>str</code>,</strong> <em>optional</em>, defaults to <code>&quot;main&quot;</code>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <code>revision</code> can be any identifier allowed by git.`,name:"revision(str,"},{anchor:"transformers.PreTrainedTokenizerBase.from_pretrained.subfolder",description:`<strong>subfolder</strong> (<code>str</code>, <em>optional</em>) &#x2014; In case the relevant files are located inside a subfolder of the model repo on huggingface.co (e.g. for facebook/rag-token-base), specify it here.`,name:"subfolder"},{anchor:"transformers.PreTrainedTokenizerBase.from_pretrained.inputs",description:`<strong>inputs</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the Tokenizer <code>__init__</code> method.`,name:"inputs"},{anchor:"transformers.PreTrainedTokenizerBase.from_pretrained.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Will be passed to the Tokenizer <code>__init__</code> method. Can be used to set special tokens like <code>bos_token</code>, <code>eos_token</code>, <code>unk_token</code>, <code>sep_token</code>, <code>pad_token</code>, <code>cls_token</code>, <code>mask_token</code>, <code>additional_special_tokens</code>. See parameters in the <code>__init__</code> for more details.`,name:"kwargs"}]}}),je=new Pr({props:{$$slots:{default:[nh]},$$scope:{ctx:W}}}),vt=new _s({props:{code:`# We can't instantiate directly the base class *PreTrainedTokenizerBase* so let's show our examples on a derived class: BertTokenizer # Download vocabulary from huggingface.co and cache. tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") # Download vocabulary from huggingface.co (user-uploaded) and cache. tokenizer = BertTokenizer.from_pretrained("dbmdz/bert-base-german-cased") # If vocabulary files are in a directory (e.g. tokenizer was saved using *save_pretrained('./test/saved_model/')*) tokenizer = BertTokenizer.from_pretrained("./test/saved_model/") # If the tokenizer uses a single vocabulary file, you can point directly to this file tokenizer = BertTokenizer.from_pretrained("./test/saved_model/my_vocab.txt") # You can link tokens to special vocabulary when instantiating tokenizer = BertTokenizer.from_pretrained("bert-base-uncased", unk_token="<unk>") # You should be sure '<unk>' is in the vocabulary when doing that. # Otherwise use tokenizer.add_special_tokens({'unk_token': '<unk>'}) instead) assert tokenizer.unk_token == "<unk>"`,highlighted:`<span class="hljs-comment"># We can&#x27;t instantiate directly the base class *PreTrainedTokenizerBase* so let&#x27;s show our examples on a derived class: BertTokenizer</span> <span class="hljs-comment"># Download vocabulary from huggingface.co and cache.</span> tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>) <span class="hljs-comment"># Download vocabulary from huggingface.co (user-uploaded) and cache.</span> tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&quot;dbmdz/bert-base-german-cased&quot;</span>) <span class="hljs-comment"># If vocabulary files are in a directory (e.g. tokenizer was saved using *save_pretrained(&#x27;./test/saved_model/&#x27;)*)</span> tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&quot;./test/saved_model/&quot;</span>) <span class="hljs-comment"># If the tokenizer uses a single vocabulary file, you can point directly to this file</span> tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&quot;./test/saved_model/my_vocab.txt&quot;</span>) <span class="hljs-comment"># You can link tokens to special vocabulary when instantiating</span> tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>, unk_token=<span class="hljs-string">&quot;&lt;unk&gt;&quot;</span>) <span class="hljs-comment"># You should be sure &#x27;&lt;unk&gt;&#x27; is in the vocabulary when doing that.</span> <span class="hljs-comment"># Otherwise use tokenizer.add_special_tokens({&#x27;unk_token&#x27;: &#x27;&lt;unk&gt;&#x27;}) instead)</span> <span class="hljs-keyword">assert</span> tokenizer.unk_token == <span class="hljs-string">&quot;&lt;unk&gt;&quot;</span>`}}),Tt=new z({props:{name:"get_special_tokens_mask",anchor:"transformers.PreTrainedTokenizerBase.get_special_tokens_mask",parameters:[{name:"token_ids_0",val:": typing.List[int]"},{name:"token_ids_1",val:": typing.Optional[typing.List[int]] = None"},{name:"already_has_special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L3328",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.get_special_tokens_mask.token_ids_0",description:`<strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of ids of the first sequence.`,name:"token_ids_0"},{anchor:"transformers.PreTrainedTokenizerBase.get_special_tokens_mask.token_ids_1",description:`<strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; List of ids of the second sequence.`,name:"token_ids_1"},{anchor:"transformers.PreTrainedTokenizerBase.get_special_tokens_mask.already_has_special_tokens",description:`<strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.`,name:"already_has_special_tokens"}],returnDescription:` <p>1 for a special token, 0 for a sequence token.</p> `,returnType:` <p>A list of integers in the range [0, 1]</p> `}}),yt=new z({props:{name:"get_vocab",anchor:"transformers.PreTrainedTokenizerBase.get_vocab",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L1539",returnDescription:` <p>The vocabulary.</p> `,returnType:` <p><code>Dict[str, int]</code></p> `}}),wt=new z({props:{name:"pad",anchor:"transformers.PreTrainedTokenizerBase.pad",parameters:[{name:"encoded_inputs",val:": typing.Union[transformers.tokenization_utils_base.BatchEncoding, typing.List[transformers.tokenization_utils_base.BatchEncoding], typing.Dict[str, typing.List[int]], typing.Dict[str, typing.List[typing.List[int]]], typing.List[typing.Dict[str, typing.List[int]]]]"},{name:"padding",val:": typing.Union[bool, str, transformers.file_utils.PaddingStrategy] = True"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"pad_to_multiple_of",val:": typing.Optional[int] = None"},{name:"return_attention_mask",val:": typing.Optional[bool] = None"},{name:"return_tensors",val:": typing.Union[str, transformers.file_utils.TensorType, NoneType] = None"},{name:"verbose",val:": bool = True"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L2703",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.pad.encoded_inputs",description:`<strong>encoded_inputs</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding">BatchEncoding</a>, list of <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding">BatchEncoding</a>, <code>Dict[str, List[int]]</code>, <code>Dict[str, List[List[int]]</code> or <code>List[Dict[str, List[int]]]</code>) &#x2014; Tokenized inputs. Can represent one input (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding">BatchEncoding</a> or <code>Dict[str, List[int]]</code>) or a batch of tokenized inputs (list of <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding">BatchEncoding</a>, <em>Dict[str, List[List[int]]]</em> or <em>List[Dict[str, List[int]]]</em>) so you can use this method during preprocessing as well as in a PyTorch Dataloader collate function.</p> <p>Instead of <code>List[int]</code> you can have tensors (numpy arrays, PyTorch tensors or TensorFlow tensors), see the note above for the return type.`,name:"encoded_inputs"},{anchor:"transformers.PreTrainedTokenizerBase.pad.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Select a strategy to pad the returned sequences (according to the model&#x2019;s padding side and padding index) among:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.PreTrainedTokenizerBase.pad.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Maximum length of the returned list and optionally padding length (see above).`,name:"max_length"},{anchor:"transformers.PreTrainedTokenizerBase.pad.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value.</p> <p>This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability</p> <blockquote> <p>= 7.5 (Volta).</p> </blockquote>`,name:"pad_to_multiple_of"},{anchor:"transformers.PreTrainedTokenizerBase.pad.return_attention_mask",description:`<strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"return_attention_mask"},{anchor:"transformers.PreTrainedTokenizerBase.pad.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul>`,name:"return_tensors"},{anchor:"transformers.PreTrainedTokenizerBase.pad.verbose",description:`<strong>verbose</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to print more information and warnings.`,name:"verbose"}]}}),Re=new Pr({props:{$$slots:{default:[rh]},$$scope:{ctx:W}}}),zt=new z({props:{name:"prepare_for_model",anchor:"transformers.PreTrainedTokenizerBase.prepare_for_model",parameters:[{name:"ids",val:": typing.List[int]"},{name:"pair_ids",val:": typing.Optional[typing.List[int]] = None"},{name:"add_special_tokens",val:": bool = True"},{name:"padding",val:": typing.Union[bool, str, transformers.file_utils.PaddingStrategy] = False"},{name:"truncation",val:": typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"stride",val:": int = 0"},{name:"pad_to_multiple_of",val:": typing.Optional[int] = None"},{name:"return_tensors",val:": typing.Union[str, transformers.file_utils.TensorType, NoneType] = None"},{name:"return_token_type_ids",val:": typing.Optional[bool] = None"},{name:"return_attention_mask",val:": typing.Optional[bool] = None"},{name:"return_overflowing_tokens",val:": bool = False"},{name:"return_special_tokens_mask",val:": bool = False"},{name:"return_offsets_mapping",val:": bool = False"},{name:"return_length",val:": bool = False"},{name:"verbose",val:": bool = True"},{name:"prepend_batch_axis",val:": bool = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L2904",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.prepare_for_model.ids",description:`<strong>ids</strong> (<code>List[int]</code>) &#x2014; Tokenized input ids of the first sequence. Can be obtained from a string by chaining the <code>tokenize</code> and <code>convert_tokens_to_ids</code> methods.`,name:"ids"},{anchor:"transformers.PreTrainedTokenizerBase.prepare_for_model.pair_ids",description:`<strong>pair_ids</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Tokenized input ids of the second sequence. Can be obtained from a string by chaining the <code>tokenize</code> and <code>convert_tokens_to_ids</code> methods.`,name:"pair_ids"},{anchor:"transformers.PreTrainedTokenizerBase.prepare_for_model.add_special_tokens",description:`<strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.`,name:"add_special_tokens"},{anchor:"transformers.PreTrainedTokenizerBase.prepare_for_model.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.PreTrainedTokenizerBase.prepare_for_model.truncation",description:`<strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul>`,name:"truncation"},{anchor:"transformers.PreTrainedTokenizerBase.prepare_for_model.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.`,name:"max_length"},{anchor:"transformers.PreTrainedTokenizerBase.prepare_for_model.stride",description:`<strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.`,name:"stride"},{anchor:"transformers.PreTrainedTokenizerBase.prepare_for_model.is_split_into_words",description:`<strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.`,name:"is_split_into_words"},{anchor:"transformers.PreTrainedTokenizerBase.prepare_for_model.pad_to_multiple_of",description:`<strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).`,name:"pad_to_multiple_of"},{anchor:"transformers.PreTrainedTokenizerBase.prepare_for_model.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul>`,name:"return_tensors"},{anchor:"transformers.PreTrainedTokenizerBase.prepare_for_model.return_token_type_ids",description:`<strong>return_token_type_ids</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a>`,name:"return_token_type_ids"},{anchor:"transformers.PreTrainedTokenizerBase.prepare_for_model.return_attention_mask",description:`<strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a>`,name:"return_attention_mask"},{anchor:"transformers.PreTrainedTokenizerBase.prepare_for_model.return_overflowing_tokens",description:`<strong>return_overflowing_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with <code>truncation_strategy = longest_first</code> or <code>True</code>, an error is raised instead of returning overflowing tokens.`,name:"return_overflowing_tokens"},{anchor:"transformers.PreTrainedTokenizerBase.prepare_for_model.return_special_tokens_mask",description:`<strong>return_special_tokens_mask</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return special tokens mask information.`,name:"return_special_tokens_mask"},{anchor:"transformers.PreTrainedTokenizerBase.prepare_for_model.return_offsets_mapping",description:`<strong>return_offsets_mapping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return <code>(char_start, char_end)</code> for each token.</p> <p>This is only available on fast tokenizers inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>, if using Python&#x2019;s tokenizer, this method will raise <code>NotImplementedError</code>.`,name:"return_offsets_mapping"},{anchor:"transformers.PreTrainedTokenizerBase.prepare_for_model.return_length",description:`<strong>return_length</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the lengths of the encoded inputs.`,name:"return_length"},{anchor:"transformers.PreTrainedTokenizerBase.prepare_for_model.verbose",description:`<strong>verbose</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to print more information and warnings. **kwargs &#x2014; passed to the <code>self.tokenize()</code> method`,name:"verbose"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a> with the following fields:</p> <ul> <li> <p><strong>input_ids</strong> \u2014 List of token ids to be fed to a model.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> </li> <li> <p><strong>token_type_ids</strong> \u2014 List of token type ids to be fed to a model (when <code>return_token_type_ids=True</code> or if <em>\u201Ctoken_type_ids\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a></p> </li> <li> <p><strong>attention_mask</strong> \u2014 List of indices specifying which tokens should be attended to by the model (when <code>return_attention_mask=True</code> or if <em>\u201Cattention_mask\u201D</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> </li> <li> <p><strong>overflowing_tokens</strong> \u2014 List of overflowing tokens sequences (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>num_truncated_tokens</strong> \u2014 Number of tokens truncated (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>special_tokens_mask</strong> \u2014 List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when <code>add_special_tokens=True</code> and <code>return_special_tokens_mask=True</code>).</p> </li> <li> <p><strong>length</strong> \u2014 The length of the inputs (when <code>return_length=True</code>)</p> </li> </ul> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></p> `}}),xt=new z({props:{name:"prepare_seq2seq_batch",anchor:"transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch",parameters:[{name:"src_texts",val:": typing.List[str]"},{name:"tgt_texts",val:": typing.Optional[typing.List[str]] = None"},{name:"max_length",val:": typing.Optional[int] = None"},{name:"max_target_length",val:": typing.Optional[int] = None"},{name:"padding",val:": str = 'longest'"},{name:"return_tensors",val:": str = None"},{name:"truncation",val:": bool = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L3438",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch.src_texts",description:`<strong>src_texts</strong> (<code>List[str]</code>) &#x2014; List of documents to summarize or source language texts.`,name:"src_texts"},{anchor:"transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch.tgt_texts",description:`<strong>tgt_texts</strong> (<code>list</code>, <em>optional</em>) &#x2014; List of summaries or target language texts.`,name:"tgt_texts"},{anchor:"transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch.max_length",description:`<strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length for encoder inputs (documents to summarize or source language texts) If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.`,name:"max_length"},{anchor:"transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch.max_target_length",description:`<strong>max_target_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length of decoder inputs (target language texts or summaries) If left unset or set to <code>None</code>, this will use the max_length value.`,name:"max_target_length"},{anchor:"transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch.padding",description:`<strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul>`,name:"padding"},{anchor:"transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch.return_tensors",description:`<strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul>`,name:"return_tensors"},{anchor:"transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch.truncation",description:`<strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). **kwargs &#x2014; Additional keyword arguments passed along to <code>self.__call__</code>.</li> </ul>`,name:"truncation"}],returnDescription:` <p>A <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a> with the following fields:</p> <ul> <li><strong>input_ids</strong> \u2014 List of token ids to be fed to the encoder.</li> <li><strong>attention_mask</strong> \u2014 List of indices specifying which tokens should be attended to by the model.</li> <li><strong>labels</strong> \u2014 List of token ids for tgt_texts.</li> </ul> <p>The full set of keys <code>[input_ids, attention_mask, labels]</code>, will only be returned if tgt_texts is passed. Otherwise, input_ids, attention_mask will be the only keys.</p> `,returnType:` <p><a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></p> `}}),$t=new z({props:{name:"push_to_hub",anchor:"transformers.file_utils.PushToHubMixin.push_to_hub",parameters:[{name:"repo_path_or_name",val:": typing.Optional[str] = None"},{name:"repo_url",val:": typing.Optional[str] = None"},{name:"use_temp_dir",val:": bool = False"},{name:"commit_message",val:": typing.Optional[str] = None"},{name:"organization",val:": typing.Optional[str] = None"},{name:"private",val:": typing.Optional[bool] = None"},{name:"use_auth_token",val:": typing.Union[bool, str, NoneType] = None"},{name:"**model_card_kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/file_utils.py#L2842",parametersDescription:[{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.repo_path_or_name",description:`<strong>repo_path_or_name</strong> (<code>str</code>, <em>optional</em>) &#x2014; Can either be a repository name for your tokenizer in the Hub or a path to a local folder (in which case the repository will have the name of that local folder). If not specified, will default to the name given by <code>repo_url</code> and a local directory with that name will be created.`,name:"repo_path_or_name"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.repo_url",description:`<strong>repo_url</strong> (<code>str</code>, <em>optional</em>) &#x2014; Specify this in case you want to push to an existing repository in the hub. If unspecified, a new repository will be created in your namespace (unless you specify an <code>organization</code>) with <code>repo_name</code>.`,name:"repo_url"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.use_temp_dir",description:`<strong>use_temp_dir</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to clone the distant repo in a temporary directory or in <code>repo_path_or_name</code> inside the current working directory. This will slow things down if you are making changes in an existing repo since you will need to clone the repo before every push.`,name:"use_temp_dir"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.commit_message",description:`<strong>commit_message</strong> (<code>str</code>, <em>optional</em>) &#x2014; Message to commit while pushing. Will default to <code>&quot;add tokenizer&quot;</code>.`,name:"commit_message"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.organization",description:`<strong>organization</strong> (<code>str</code>, <em>optional</em>) &#x2014; Organization in which you want to push your tokenizer (you must be a member of this organization).`,name:"organization"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.private",description:`<strong>private</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not the repository created should be private (requires a paying subscription).`,name:"private"},{anchor:"transformers.file_utils.PushToHubMixin.push_to_hub.use_auth_token",description:`<strong>use_auth_token</strong> (<code>bool</code> or <code>str</code>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>transformers-cli login</code> (stored in <code>~/.huggingface</code>). Will default to <code>True</code> if <code>repo_url</code> is not specified.`,name:"use_auth_token"}],returnDescription:` <p>The url of the commit of your tokenizer in the given repository.</p> `,returnType:` <p><code>str</code></p> `}}),qt=new _s({props:{code:`from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") # Push the tokenizer to your namespace with the name "my-finetuned-bert" and have a local clone in the # *my-finetuned-bert* folder. tokenizer.push_to_hub("my-finetuned-bert") # Push the tokenizer to your namespace with the name "my-finetuned-bert" with no local clone. tokenizer.push_to_hub("my-finetuned-bert", use_temp_dir=True) # Push the tokenizer to an organization with the name "my-finetuned-bert" and have a local clone in the # *my-finetuned-bert* folder. tokenizer.push_to_hub("my-finetuned-bert", organization="huggingface") # Make a change to an existing repo that has been cloned locally in *my-finetuned-bert*. tokenizer.push_to_hub("my-finetuned-bert", repo_url="https://huggingface.co/sgugger/my-finetuned-bert")`,highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-comment"># Push the tokenizer to your namespace with the name &quot;my-finetuned-bert&quot; and have a local clone in the</span> <span class="hljs-comment"># *my-finetuned-bert* folder.</span> tokenizer.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>) <span class="hljs-comment"># Push the tokenizer to your namespace with the name &quot;my-finetuned-bert&quot; with no local clone.</span> tokenizer.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, use_temp_dir=<span class="hljs-literal">True</span>) <span class="hljs-comment"># Push the tokenizer to an organization with the name &quot;my-finetuned-bert&quot; and have a local clone in the</span> <span class="hljs-comment"># *my-finetuned-bert* folder.</span> tokenizer.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, organization=<span class="hljs-string">&quot;huggingface&quot;</span>) <span class="hljs-comment"># Make a change to an existing repo that has been cloned locally in *my-finetuned-bert*.</span> tokenizer.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, repo_url=<span class="hljs-string">&quot;https://huggingface.co/sgugger/my-finetuned-bert&quot;</span>)`}}),Et=new z({props:{name:"register_for_auto_class",anchor:"transformers.PreTrainedTokenizerBase.register_for_auto_class",parameters:[{name:"auto_class",val:" = 'AutoTokenizer'"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L3412",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.register_for_auto_class.auto_class",description:`<strong>auto_class</strong> (<code>str</code> or <code>type</code>, <em>optional</em>, defaults to <code>&quot;AutoTokenizer&quot;</code>) &#x2014; The auto class to register this new tokenizer with.`,name:"auto_class"}]}}),He=new Pr({props:{warning:"&lcub;true}",$$slots:{default:[sh]},$$scope:{ctx:W}}}),Lt=new z({props:{name:"save_pretrained",anchor:"transformers.PreTrainedTokenizerBase.save_pretrained",parameters:[{name:"save_directory",val:": typing.Union[str, os.PathLike]"},{name:"legacy_format",val:": typing.Optional[bool] = None"},{name:"filename_prefix",val:": typing.Optional[str] = None"},{name:"push_to_hub",val:": bool = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L1990",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.save_pretrained.save_directory",description:"<strong>save_directory</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; The path to a directory where the tokenizer will be saved.",name:"save_directory"},{anchor:"transformers.PreTrainedTokenizerBase.save_pretrained.legacy_format",description:`<strong>legacy_format</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Only applicable for a fast tokenizer. If unset (default), will save the tokenizer in the unified JSON format as well as in legacy format if it exists, i.e. with tokenizer specific vocabulary and a separate added_tokens files.</p> <p>If <code>False</code>, will only save the tokenizer in the unified JSON format. This format is incompatible with &#x201C;slow&#x201D; tokenizers (not powered by the <em>tokenizers</em> library), so the tokenizer will not be able to be loaded in the corresponding &#x201C;slow&#x201D; tokenizer.</p> <p>If <code>True</code>, will save the tokenizer in legacy format. If the &#x201C;slow&#x201D; tokenizer doesn&#x2019;t exits, a value error is raised. filename_prefix &#x2014; (<code>str</code>, <em>optional</em>): A prefix to add to the names of the files saved by the tokenizer.`,name:"legacy_format"},{anchor:"transformers.PreTrainedTokenizerBase.save_pretrained.push_to_hub",description:`<strong>push_to_hub</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to push your model to the Hugging Face model hub after saving it.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p>Using <code>push_to_hub=True</code> will synchronize the repository you are pushing to with <code>save_directory</code>, which requires <code>save_directory</code> to be a local clone of the repo you are pushing to if it&#x2019;s an existing folder. Pass along <code>temp_dir=True</code> to use a temporary directory instead.</p> </div>`,name:"push_to_hub"}],returnDescription:` <p>The files saved.</p> `,returnType:` <p>A tuple of <code>str</code></p> `}}),Nt=new z({props:{name:"save_vocabulary",anchor:"transformers.PreTrainedTokenizerBase.save_vocabulary",parameters:[{name:"save_directory",val:": str"},{name:"filename_prefix",val:": typing.Optional[str] = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L2151",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.save_vocabulary.save_directory",description:`<strong>save_directory</strong> (<code>str</code>) &#x2014; The directory in which to save the vocabulary.`,name:"save_directory"},{anchor:"transformers.PreTrainedTokenizerBase.save_vocabulary.filename_prefix",description:`<strong>filename_prefix</strong> (<code>str</code>, <em>optional</em>) &#x2014; An optional prefix to add to the named of the saved files.`,name:"filename_prefix"}],returnDescription:` <p>Paths to the files saved.</p> `,returnType:` <p><code>Tuple(str)</code></p> `}}),Ft=new z({props:{name:"tokenize",anchor:"transformers.PreTrainedTokenizerBase.tokenize",parameters:[{name:"text",val:": str"},{name:"pair",val:": typing.Optional[str] = None"},{name:"add_special_tokens",val:": bool = False"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L2169",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.tokenize.text",description:`<strong>text</strong> (<code>str</code>) &#x2014; The sequence to be encoded.`,name:"text"},{anchor:"transformers.PreTrainedTokenizerBase.tokenize.pair",description:`<strong>pair</strong> (<code>str</code>, <em>optional</em>) &#x2014; A second sequence to be encoded with the first.`,name:"pair"},{anchor:"transformers.PreTrainedTokenizerBase.tokenize.add_special_tokens",description:`<strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to add the special tokens associated with the corresponding model.`,name:"add_special_tokens"},{anchor:"transformers.PreTrainedTokenizerBase.tokenize.kwargs",description:`<strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Will be passed to the underlying model specific encode method. See details in <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__"><strong>call</strong>()</a>`,name:"kwargs"}],returnDescription:` <p>The list of tokens.</p> `,returnType:` <p><code>List[str]</code></p> `}}),Ct=new z({props:{name:"truncate_sequences",anchor:"transformers.PreTrainedTokenizerBase.truncate_sequences",parameters:[{name:"ids",val:": typing.List[int]"},{name:"pair_ids",val:": typing.Optional[typing.List[int]] = None"},{name:"num_tokens_to_remove",val:": int = 0"},{name:"truncation_strategy",val:": typing.Union[str, transformers.tokenization_utils_base.TruncationStrategy] = 'longest_first'"},{name:"stride",val:": int = 0"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L3040",parametersDescription:[{anchor:"transformers.PreTrainedTokenizerBase.truncate_sequences.ids",description:`<strong>ids</strong> (<code>List[int]</code>) &#x2014; Tokenized input ids of the first sequence. Can be obtained from a string by chaining the <code>tokenize</code> and <code>convert_tokens_to_ids</code> methods.`,name:"ids"},{anchor:"transformers.PreTrainedTokenizerBase.truncate_sequences.pair_ids",description:`<strong>pair_ids</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Tokenized input ids of the second sequence. Can be obtained from a string by chaining the <code>tokenize</code> and <code>convert_tokens_to_ids</code> methods.`,name:"pair_ids"},{anchor:"transformers.PreTrainedTokenizerBase.truncate_sequences.num_tokens_to_remove",description:`<strong>num_tokens_to_remove</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; Number of tokens to remove using the truncation strategy.`,name:"num_tokens_to_remove"},{anchor:"transformers.PreTrainedTokenizerBase.truncate_sequences.truncation_strategy",description:`<strong>truncation_strategy</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; The strategy to follow for truncation. Can be:</p> <ul> <li><code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul>`,name:"truncation_strategy"},{anchor:"transformers.PreTrainedTokenizerBase.truncate_sequences.stride",description:`<strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a positive number, the overflowing tokens returned will contain some tokens from the main sequence returned. The value of this argument defines the number of additional tokens.`,name:"stride"}],returnDescription:` <p>The truncated <code>ids</code>, the truncated <code>pair_ids</code> and the list of overflowing tokens. Note: The <em>longest_first</em> strategy returns empty list of overflowing tokens if a pair of sequences (or a batch of pairs) is provided.</p> `,returnType:` <p><code>Tuple[List[int], List[int], List[int]]</code></p> `}}),Ot=new gs({}),Wt=new z({props:{name:"class transformers.SpecialTokensMixin",anchor:"transformers.SpecialTokensMixin",parameters:[{name:"verbose",val:" = True"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L753",parametersDescription:[{anchor:"transformers.SpecialTokensMixin.bos_token",description:`<strong>bos_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the beginning of a sentence.`,name:"bos_token"},{anchor:"transformers.SpecialTokensMixin.eos_token",description:`<strong>eos_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the end of a sentence.`,name:"eos_token"},{anchor:"transformers.SpecialTokensMixin.unk_token",description:`<strong>unk_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing an out-of-vocabulary token.`,name:"unk_token"},{anchor:"transformers.SpecialTokensMixin.sep_token",description:`<strong>sep_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token separating two different sentences in the same input (used by BERT for instance).`,name:"sep_token"},{anchor:"transformers.SpecialTokensMixin.pad_token",description:`<strong>pad_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by attention mechanisms or loss computation.`,name:"pad_token"},{anchor:"transformers.SpecialTokensMixin.cls_token",description:`<strong>cls_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the class of the input (used by BERT for instance).`,name:"cls_token"},{anchor:"transformers.SpecialTokensMixin.mask_token",description:`<strong>mask_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing a masked token (used by masked-language modeling pretraining objectives, like BERT).`,name:"mask_token"},{anchor:"transformers.SpecialTokensMixin.additional_special_tokens",description:`<strong>additional_special_tokens</strong> (tuple or list of <code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A tuple or a list of additional special tokens.`,name:"additional_special_tokens"}]}}),jt=new z({props:{name:"add_special_tokens",anchor:"transformers.SpecialTokensMixin.add_special_tokens",parameters:[{name:"special_tokens_dict",val:": typing.Dict[str, typing.Union[str, tokenizers.AddedToken]]"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L833",parametersDescription:[{anchor:"transformers.SpecialTokensMixin.add_special_tokens.special_tokens_dict",description:`<strong>special_tokens_dict</strong> (dictionary <em>str</em> to <em>str</em> or <code>tokenizers.AddedToken</code>) &#x2014; Keys should be in the list of predefined special attributes: [<code>bos_token</code>, <code>eos_token</code>, <code>unk_token</code>, <code>sep_token</code>, <code>pad_token</code>, <code>cls_token</code>, <code>mask_token</code>, <code>additional_special_tokens</code>].</p> <p>Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer assign the index of the <code>unk_token</code> to them).`,name:"special_tokens_dict"}],returnDescription:` <p>Number of tokens added to the vocabulary.</p> `,returnType:` <p><code>int</code></p> `}}),Ht=new _s({props:{code:`# Let's see how to add a new classification token to GPT-2 tokenizer = GPT2Tokenizer.from_pretrained("gpt2") model = GPT2Model.from_pretrained("gpt2") special_tokens_dict = {"cls_token": "<CLS>"} num_added_toks = tokenizer.add_special_tokens(special_tokens_dict) print("We have added", num_added_toks, "tokens") # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer. model.resize_token_embeddings(len(tokenizer)) assert tokenizer.cls_token == "<CLS>"`,highlighted:`<span class="hljs-comment"># Let&#x27;s see how to add a new classification token to GPT-2</span> tokenizer = GPT2Tokenizer.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) model = GPT2Model.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) special_tokens_dict = {<span class="hljs-string">&quot;cls_token&quot;</span>: <span class="hljs-string">&quot;&lt;CLS&gt;&quot;</span>} num_added_toks = tokenizer.add_special_tokens(special_tokens_dict) <span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;We have added&quot;</span>, num_added_toks, <span class="hljs-string">&quot;tokens&quot;</span>) <span class="hljs-comment"># Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer.</span> model.resize_token_embeddings(<span class="hljs-built_in">len</span>(tokenizer)) <span class="hljs-keyword">assert</span> tokenizer.cls_token == <span class="hljs-string">&quot;&lt;CLS&gt;&quot;</span>`}}),Gt=new z({props:{name:"add_tokens",anchor:"transformers.SpecialTokensMixin.add_tokens",parameters:[{name:"new_tokens",val:": typing.Union[str, tokenizers.AddedToken, typing.List[typing.Union[str, tokenizers.AddedToken]]]"},{name:"special_tokens",val:": bool = False"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L905",parametersDescription:[{anchor:"transformers.SpecialTokensMixin.add_tokens.new_tokens",description:`<strong>new_tokens</strong> (<code>str</code>, <code>tokenizers.AddedToken</code> or a list of <em>str</em> or <code>tokenizers.AddedToken</code>) &#x2014; Tokens are only added if they are not already in the vocabulary. <code>tokenizers.AddedToken</code> wraps a string token to let you personalize its behavior: whether this token should only match against a single word, whether this token should strip all potential whitespaces on the left side, whether this token should strip all potential whitespaces on the right side, etc.`,name:"new_tokens"},{anchor:"transformers.SpecialTokensMixin.add_tokens.special_tokens",description:`<strong>special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Can be used to specify if the token is a special token. This mostly change the normalization behavior (special tokens like CLS or [MASK] are usually not lower-cased for instance).</p> <p>See details for <code>tokenizers.AddedToken</code> in HuggingFace tokenizers library.`,name:"special_tokens"}],returnDescription:` <p>Number of tokens added to the vocabulary.</p> `,returnType:` <p><code>int</code></p> `}}),Yt=new _s({props:{code:`# Let's see how to increase the vocabulary of Bert model and tokenizer tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased") model = BertModel.from_pretrained("bert-base-uncased") num_added_toks = tokenizer.add_tokens(["new_tok1", "my_new-tok2"]) print("We have added", num_added_toks, "tokens") # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer. model.resize_token_embeddings(len(tokenizer))`,highlighted:`<span class="hljs-comment"># Let&#x27;s see how to increase the vocabulary of Bert model and tokenizer</span> tokenizer = BertTokenizerFast.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>) model = BertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>) num_added_toks = tokenizer.add_tokens([<span class="hljs-string">&quot;new_tok1&quot;</span>, <span class="hljs-string">&quot;my_new-tok2&quot;</span>]) <span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;We have added&quot;</span>, num_added_toks, <span class="hljs-string">&quot;tokens&quot;</span>) <span class="hljs-comment"># Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer.</span> model.resize_token_embeddings(<span class="hljs-built_in">len</span>(tokenizer))`}}),Jt=new z({props:{name:"sanitize_special_tokens",anchor:"transformers.SpecialTokensMixin.sanitize_special_tokens",parameters:[],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L821",returnDescription:` <p>The number of tokens added in the vocabulary during the operation.</p> `,returnType:` <p><code>int</code></p> `}}),Kt=new gs({}),Qt=new z({props:{name:"class transformers.tokenization_utils_base.TruncationStrategy",anchor:"transformers.tokenization_utils_base.TruncationStrategy",parameters:[{name:"value",val:""},{name:"names",val:" = None"},{name:"module",val:" = None"},{name:"qualname",val:" = None"},{name:"type",val:" = None"},{name:"start",val:" = 1"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L129"}}),Zt=new z({props:{name:"class transformers.CharSpan",anchor:"transformers.CharSpan",parameters:[{name:"start",val:": int"},{name:"end",val:": int"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L141",parametersDescription:[{anchor:"transformers.CharSpan.start",description:"<strong>start</strong> (<code>int</code>) &#x2014; Index of the first character in the original string.",name:"start"},{anchor:"transformers.CharSpan.end",description:"<strong>end</strong> (<code>int</code>) &#x2014; Index of the character following the last character in the original string.",name:"end"}]}}),eo=new z({props:{name:"class transformers.TokenSpan",anchor:"transformers.TokenSpan",parameters:[{name:"start",val:": int"},{name:"end",val:": int"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L154",parametersDescription:[{anchor:"transformers.TokenSpan.start",description:"<strong>start</strong> (<code>int</code>) &#x2014; Index of the first token in the span.",name:"start"},{anchor:"transformers.TokenSpan.end",description:"<strong>end</strong> (<code>int</code>) &#x2014; Index of the token following the last token in the span.",name:"end"}]}}),{c(){h=r("meta"),P=d(),v=r("h1"),y=r("a"),E=r("span"),u(w.$$.fragment),x=d(),B=r("span"),ge=o("Utilities for Tokenizers"),j=d(),D=r("p"),ro=o(`This page lists all the utility functions used by the tokenizers, mainly the class `),so=r("a"),ks=o("PreTrainedTokenizerBase"),bs=o(` that implements the common methods between `),ao=r("a"),vs=o("PreTrainedTokenizer"),Ts=o(" and "),io=r("a"),ys=o("PreTrainedTokenizerFast"),ws=o(` and the mixin `),co=r("a"),zs=o("SpecialTokensMixin"),xs=o("."),qr=d(),lo=r("p"),$s=o("Most of those are only useful if you are studying the code of the tokenizers in the library."),Er=d(),ke=r("h2"),De=r("a"),So=r("span"),u(ot.$$.fragment),Ps=d(),Co=r("span"),qs=o("PreTrainedTokenizerBase"),Br=d(),p=r("div"),u(nt.$$.fragment),Es=d(),be=r("p"),Bs=o("Base class for "),po=r("a"),Ls=o("PreTrainedTokenizer"),Ds=o(" and "),mo=r("a"),Is=o("PreTrainedTokenizerFast"),Ns=o("."),As=d(),Oo=r("p"),Fs=o("Handles shared (mostly boiler plate) methods for those two classes."),Ss=d(),Wo=r("p"),Cs=o("Class attributes (overridden by derived classes)"),Os=d(),I=r("ul"),te=r("li"),jo=r("strong"),Ws=o("vocab_files_names"),js=o(" ("),Mo=r("code"),Ms=o("Dict[str, str]"),Rs=o(") \u2014 A dictionary with, as keys, the "),Ro=r("code"),Us=o("__init__"),Vs=o(` keyword name of each vocabulary file required by the model, and as associated values, the filename for saving the associated file (string).`),Hs=d(),F=r("li"),Uo=r("strong"),Gs=o("pretrained_vocab_files_map"),Xs=o(" ("),Vo=r("code"),Ys=o("Dict[str, Dict[str, str]]"),Js=o(`) \u2014 A dictionary of dictionaries, with the high-level keys being the `),Ho=r("code"),Ks=o("__init__"),Qs=o(` keyword name of each vocabulary file required by the model, the low-level being the `),Go=r("code"),Zs=o("short-cut-names"),ea=o(" of the pretrained models with, as associated values, the "),Xo=r("code"),ta=o("url"),oa=o(` to the associated pretrained vocabulary file.`),na=d(),V=r("li"),Yo=r("strong"),ra=o("max_model_input_sizes"),sa=o(" ("),Jo=r("code"),aa=o("Dict[str, Optional[int]]"),ia=o(") \u2014 A dictionary with, as keys, the "),Ko=r("code"),da=o("short-cut-names"),ca=o(` of the pretrained models, and as associated values, the maximum length of the sequence inputs of this model, or `),Qo=r("code"),la=o("None"),pa=o(" if the model has no maximum input size."),ma=d(),S=r("li"),Zo=r("strong"),ha=o("pretrained_init_configuration"),ua=o(" ("),en=r("code"),fa=o("Dict[str, Dict[str, Any]]"),_a=o(`) \u2014 A dictionary with, as keys, the `),tn=r("code"),ga=o("short-cut-names"),ka=o(` of the pretrained models, and as associated values, a dictionary of specific arguments to pass to the `),on=r("code"),ba=o("__init__"),va=o(` method of the tokenizer class for this pretrained model when loading the tokenizer with the `),ho=r("a"),Ta=o("from_pretrained()"),ya=o(" method."),wa=d(),Ie=r("li"),nn=r("strong"),za=o("model_input_names"),xa=o(" ("),rn=r("code"),$a=o("List[str]"),Pa=o(") \u2014 A list of inputs expected in the forward pass of the model."),qa=d(),H=r("li"),sn=r("strong"),Ea=o("padding_side"),Ba=o(" ("),an=r("code"),La=o("str"),Da=o(`) \u2014 The default value for the side on which the model should have padding applied. Should be `),dn=r("code"),Ia=o("'right'"),Na=o(" or "),cn=r("code"),Aa=o("'left'"),Fa=o("."),Sa=d(),G=r("li"),ln=r("strong"),Ca=o("truncation_side"),Oa=o(" ("),pn=r("code"),Wa=o("str"),ja=o(`) \u2014 The default value for the side on which the model should have truncation applied. Should be `),mn=r("code"),Ma=o("'right'"),Ra=o(" or "),hn=r("code"),Ua=o("'left'"),Va=o("."),Ha=d(),Ne=r("div"),u(rt.$$.fragment),Ga=d(),un=r("p"),Xa=o(`Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences.`),Ya=d(),Ae=r("div"),u(st.$$.fragment),Ja=d(),fn=r("p"),Ka=o(`Temporarily sets the tokenizer for encoding the targets. Useful for tokenizer associated to sequence-to-sequence models that need a slightly different processing for the labels.`),Qa=d(),Fe=r("div"),u(at.$$.fragment),Za=d(),_n=r("p"),ei=o("Convert a list of lists of token ids into a list of strings by calling decode."),ti=d(),oe=r("div"),u(it.$$.fragment),oi=d(),gn=r("p"),ni=o("Tokenize and prepare for the model a list of sequences or a list of pairs of sequences."),ri=d(),u(Se.$$.fragment),si=d(),ne=r("div"),u(dt.$$.fragment),ai=d(),kn=r("p"),ii=o(`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens.`),di=d(),bn=r("p"),ci=o("This implementation does not add special tokens and this method should be overridden in a subclass."),li=d(),Ce=r("div"),u(ct.$$.fragment),pi=d(),vn=r("p"),mi=o("Clean up a list of simple English tokenization artifacts like spaces before punctuations and abbreviated forms."),hi=d(),Oe=r("div"),u(lt.$$.fragment),ui=d(),pt=r("p"),fi=o("Converts a sequence of tokens in a single string. The most simple way to do it is "),Tn=r("code"),_i=o('" ".join(tokens)'),gi=o(` but we often want to remove sub-word tokenization artifacts at the same time.`),ki=d(),re=r("div"),u(mt.$$.fragment),bi=d(),uo=r("p"),vi=o("Create the token type IDs corresponding to the sequences passed. "),fo=r("a"),Ti=o(`What are token type IDs?`),yi=d(),yn=r("p"),wi=o("Should be overridden in a subclass if the model has a special way of building those."),zi=d(),se=r("div"),u(ht.$$.fragment),xi=d(),wn=r("p"),$i=o(`Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces.`),Pi=d(),ut=r("p"),qi=o("Similar to doing "),zn=r("code"),Ei=o("self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))"),Bi=o("."),Li=d(),ae=r("div"),u(ft.$$.fragment),Di=d(),xn=r("p"),Ii=o("Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary."),Ni=d(),_t=r("p"),Ai=o("Same as doing "),$n=r("code"),Fi=o("self.convert_tokens_to_ids(self.tokenize(text))"),Si=o("."),Ci=d(),ie=r("div"),u(gt.$$.fragment),Oi=d(),Pn=r("p"),Wi=o("Tokenize and prepare for the model a sequence or a pair of sequences."),ji=d(),u(We.$$.fragment),Mi=d(),C=r("div"),u(kt.$$.fragment),Ri=d(),bt=r("p"),Ui=o("Instantiate a "),_o=r("a"),Vi=o("PreTrainedTokenizerBase"),Hi=o(` (or a derived class) from a predefined tokenizer.`),Gi=d(),u(je.$$.fragment),Xi=d(),qn=r("p"),Yi=o("Examples:"),Ji=d(),u(vt.$$.fragment),Ki=d(),Me=r("div"),u(Tt.$$.fragment),Qi=d(),ve=r("p"),Zi=o(`Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),En=r("code"),ed=o("prepare_for_model"),td=o(" or "),Bn=r("code"),od=o("encode_plus"),nd=o(" methods."),rd=d(),de=r("div"),u(yt.$$.fragment),sd=d(),Ln=r("p"),ad=o("Returns the vocabulary as a dictionary of token to index."),id=d(),ce=r("p"),Dn=r("code"),dd=o("tokenizer.get_vocab()[token]"),cd=o(" is equivalent to "),In=r("code"),ld=o("tokenizer.convert_tokens_to_ids(token)"),pd=o(" when "),Nn=r("code"),md=o("token"),hd=o(` is in the vocab.`),ud=d(),X=r("div"),u(wt.$$.fragment),fd=d(),An=r("p"),_d=o(`Pad a single encoded input or a batch of encoded inputs up to predefined length or to the max sequence length in the batch.`),gd=d(),Q=r("p"),kd=o("Padding side (left/right) padding token ids are defined at the tokenizer level (with "),Fn=r("code"),bd=o("self.padding_side"),vd=o(`, `),Sn=r("code"),Td=o("self.pad_token_id"),yd=o(" and "),Cn=r("code"),wd=o("self.pad_token_type_id"),zd=o(")"),xd=d(),u(Re.$$.fragment),$d=d(),Ue=r("div"),u(zt.$$.fragment),Pd=d(),M=r("p"),qd=o(`Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens. Please Note, for `),On=r("em"),Ed=o("pair_ids"),Bd=o(` different than `),Wn=r("code"),Ld=o("None"),Dd=o(" and "),jn=r("em"),Id=o("truncation_strategy = longest_first"),Nd=o(" or "),Mn=r("code"),Ad=o("True"),Fd=o(`, it is not possible to return overflowing tokens. Such a combination of arguments will raise an error.`),Sd=d(),Ve=r("div"),u(xt.$$.fragment),Cd=d(),Rn=r("p"),Od=o("Prepare model inputs for translation. For best performance, translate one sentence at a time."),Wd=d(),Y=r("div"),u($t.$$.fragment),jd=d(),Pt=r("p"),Md=o(`Upload the tokenizer files to the \u{1F917} Model Hub while synchronizing a local clone of the repo in `),Un=r("code"),Rd=o("repo_path_or_name"),Ud=o("."),Vd=d(),Vn=r("p"),Hd=o("Examples:"),Gd=d(),u(qt.$$.fragment),Xd=d(),le=r("div"),u(Et.$$.fragment),Yd=d(),Bt=r("p"),Jd=o(`Register this class with a given auto class. This should only be used for custom tokenizers as the ones in the library are already mapped with `),Hn=r("code"),Kd=o("AutoTokenizer"),Qd=o("."),Zd=d(),u(He.$$.fragment),ec=d(),J=r("div"),u(Lt.$$.fragment),tc=d(),Gn=r("p"),oc=o("Save the full tokenizer state."),nc=d(),Dt=r("p"),rc=o(`This method make sure the full tokenizer can then be re-loaded using the `),Xn=r("code"),sc=o("from_pretrained"),ac=o(" class method.."),ic=d(),It=r("p"),dc=o(`Warning,None This won\u2019t save modifications you may have applied to the tokenizer after the instantiation (for instance, modifying `),Yn=r("code"),cc=o("tokenizer.do_lower_case"),lc=o(" after creation)."),pc=d(),pe=r("div"),u(Nt.$$.fragment),mc=d(),Jn=r("p"),hc=o("Save only the vocabulary of the tokenizer (vocabulary + added tokens)."),uc=d(),At=r("p"),fc=o(`This method won\u2019t save the configuration and special token mappings of the tokenizer. Use `),Kn=r("code"),_c=o("_save_pretrained()"),gc=o("to save the whole state of the tokenizer."),kc=d(),Ge=r("div"),u(Ft.$$.fragment),bc=d(),St=r("p"),vc=o("Converts a string in a sequence of tokens, replacing unknown tokens with the "),Qn=r("code"),Tc=o("unk_token"),yc=o("."),wc=d(),Xe=r("div"),u(Ct.$$.fragment),zc=d(),Zn=r("p"),xc=o("Truncates a sequence pair in-place following the strategy."),Lr=d(),Te=r("h2"),Ye=r("a"),er=r("span"),u(Ot.$$.fragment),$c=d(),tr=r("span"),Pc=o("SpecialTokensMixin"),Dr=d(),A=r("div"),u(Wt.$$.fragment),qc=d(),ye=r("p"),Ec=o("A mixin derived by "),go=r("a"),Bc=o("PreTrainedTokenizer"),Lc=o(" and "),ko=r("a"),Dc=o("PreTrainedTokenizerFast"),Ic=o(` to handle specific behaviors related to special tokens. In particular, this class hold the attributes which can be used to directly access these special tokens in a model-independent manner and allow to set and update the special tokens.`),Nc=d(),q=r("div"),u(jt.$$.fragment),Ac=d(),or=r("p"),Fc=o(`Add a dictionary of special tokens (eos, pad, cls, etc.) to the encoder and link them to class attributes. If special tokens are NOT in the vocabulary, they are added to it (indexed starting from the last index of the current vocabulary).`),Sc=d(),nr=r("p"),Cc=o(`Note,None When adding new tokens to the vocabulary, you should make sure to also resize the token embedding matrix of the model so that its embedding matrix matches the tokenizer.`),Oc=d(),Mt=r("p"),Wc=o("In order to do that, please use the "),bo=r("a"),jc=o("resize_token_embeddings()"),Mc=o(" method."),Rc=d(),Rt=r("p"),Uc=o("Using "),rr=r("code"),Vc=o("add_special_tokens"),Hc=o(" will ensure your special tokens can be used in several ways:"),Gc=d(),Ut=r("ul"),sr=r("li"),Xc=o("Special tokens are carefully handled by the tokenizer (they are never split)."),Yc=d(),Vt=r("li"),Jc=o("You can easily refer to special tokens using tokenizer class attributes like "),ar=r("code"),Kc=o("tokenizer.cls_token"),Qc=o(`. This makes it easy to develop model-agnostic training and fine-tuning scripts.`),Zc=d(),R=r("p"),el=o(`When possible, special tokens are already registered for provided pretrained models (for instance `),vo=r("a"),tl=o("BertTokenizer"),ol=d(),ir=r("code"),nl=o("cls_token"),rl=o(" is already registered to be :obj"),dr=r("em"),sl=o("\u2019[CLS]\u2019"),al=o(` and XLM\u2019s one is also registered to be `),cr=r("code"),il=o("'</s>'"),dl=o(")."),cl=d(),lr=r("p"),ll=o("Examples:"),pl=d(),u(Ht.$$.fragment),ml=d(),N=r("div"),u(Gt.$$.fragment),hl=d(),pr=r("p"),ul=o(`Add a list of new tokens to the tokenizer class. If the new tokens are not in the vocabulary, they are added to it with indices starting from length of the current vocabulary.`),fl=d(),mr=r("p"),_l=o(`Note,None When adding new tokens to the vocabulary, you should make sure to also resize the token embedding matrix of the model so that its embedding matrix matches the tokenizer.`),gl=d(),Xt=r("p"),kl=o("In order to do that, please use the "),To=r("a"),bl=o("resize_token_embeddings()"),vl=o(" method."),Tl=d(),hr=r("p"),yl=o("Examples:"),wl=d(),u(Yt.$$.fragment),zl=d(),me=r("div"),u(Jt.$$.fragment),xl=d(),we=r("p"),$l=o("Make sure that all the special tokens attributes of the tokenizer ("),ur=r("code"),Pl=o("tokenizer.mask_token"),ql=o(`, `),fr=r("code"),El=o("tokenizer.cls_token"),Bl=o(", etc.) are in the vocabulary."),Ll=d(),_r=r("p"),Dl=o("Add the missing ones to the vocabulary if needed."),Ir=d(),ze=r("h2"),Je=r("a"),gr=r("span"),u(Kt.$$.fragment),Il=d(),kr=r("span"),Nl=o("Enums and namedtuples"),Nr=d(),xe=r("div"),u(Qt.$$.fragment),Al=d(),$e=r("p"),Fl=o("Possible values for the "),br=r("code"),Sl=o("truncation"),Cl=o(" argument in "),Ke=r("a"),Ol=o("PreTrainedTokenizerBase."),vr=r("strong"),Wl=o("call"),jl=o("()"),Ml=o(`. Useful for tab-completion in an IDE.`),Ar=d(),Pe=r("div"),u(Zt.$$.fragment),Rl=d(),Tr=r("p"),Ul=o("Character span in the original string."),Fr=d(),qe=r("div"),u(eo.$$.fragment),Vl=d(),yr=r("p"),Hl=o("Token span in an encoded string (list of tokens)."),this.h()},l(i){const T=eh('[data-svelte="svelte-1phssyn"]',document.head);h=s(T,"META",{name:!0,content:!0}),T.forEach(t),P=c(i),v=s(i,"H1",{class:!0});var to=a(v);y=s(to,"A",{id:!0,class:!0,href:!0});var wr=a(y);E=s(wr,"SPAN",{});var zr=a(E);f(w.$$.fragment,zr),zr.forEach(t),wr.forEach(t),x=c(to),B=s(to,"SPAN",{});var xr=a(B);ge=n(xr,"Utilities for Tokenizers"),xr.forEach(t),to.forEach(t),j=c(i),D=s(i,"P",{});var U=a(D);ro=n(U,`This page lists all the utility functions used by the tokenizers, mainly the class `),so=s(U,"A",{href:!0});var Xl=a(so);ks=n(Xl,"PreTrainedTokenizerBase"),Xl.forEach(t),bs=n(U,` that implements the common methods between `),ao=s(U,"A",{href:!0});var Yl=a(ao);vs=n(Yl,"PreTrainedTokenizer"),Yl.forEach(t),Ts=n(U," and "),io=s(U,"A",{href:!0});var Jl=a(io);ys=n(Jl,"PreTrainedTokenizerFast"),Jl.forEach(t),ws=n(U,` and the mixin `),co=s(U,"A",{href:!0});var Kl=a(co);zs=n(Kl,"SpecialTokensMixin"),Kl.forEach(t),xs=n(U,"."),U.forEach(t),qr=c(i),lo=s(i,"P",{});var Ql=a(lo);$s=n(Ql,"Most of those are only useful if you are studying the code of the tokenizers in the library."),Ql.forEach(t),Er=c(i),ke=s(i,"H2",{class:!0});var Cr=a(ke);De=s(Cr,"A",{id:!0,class:!0,href:!0});var Zl=a(De);So=s(Zl,"SPAN",{});var ep=a(So);f(ot.$$.fragment,ep),ep.forEach(t),Zl.forEach(t),Ps=c(Cr),Co=s(Cr,"SPAN",{});var tp=a(Co);qs=n(tp,"PreTrainedTokenizerBase"),tp.forEach(t),Cr.forEach(t),Br=c(i),p=s(i,"DIV",{class:!0});var m=a(p);f(nt.$$.fragment,m),Es=c(m),be=s(m,"P",{});var yo=a(be);Bs=n(yo,"Base class for "),po=s(yo,"A",{href:!0});var op=a(po);Ls=n(op,"PreTrainedTokenizer"),op.forEach(t),Ds=n(yo," and "),mo=s(yo,"A",{href:!0});var np=a(mo);Is=n(np,"PreTrainedTokenizerFast"),np.forEach(t),Ns=n(yo,"."),yo.forEach(t),As=c(m),Oo=s(m,"P",{});var rp=a(Oo);Fs=n(rp,"Handles shared (mostly boiler plate) methods for those two classes."),rp.forEach(t),Ss=c(m),Wo=s(m,"P",{});var sp=a(Wo);Cs=n(sp,"Class attributes (overridden by derived classes)"),sp.forEach(t),Os=c(m),I=s(m,"UL",{});var O=a(I);te=s(O,"LI",{});var oo=a(te);jo=s(oo,"STRONG",{});var ap=a(jo);Ws=n(ap,"vocab_files_names"),ap.forEach(t),js=n(oo," ("),Mo=s(oo,"CODE",{});var ip=a(Mo);Ms=n(ip,"Dict[str, str]"),ip.forEach(t),Rs=n(oo,") \u2014 A dictionary with, as keys, the "),Ro=s(oo,"CODE",{});var dp=a(Ro);Us=n(dp,"__init__"),dp.forEach(t),Vs=n(oo,` keyword name of each vocabulary file required by the model, and as associated values, the filename for saving the associated file (string).`),oo.forEach(t),Hs=c(O),F=s(O,"LI",{});var Z=a(F);Uo=s(Z,"STRONG",{});var cp=a(Uo);Gs=n(cp,"pretrained_vocab_files_map"),cp.forEach(t),Xs=n(Z," ("),Vo=s(Z,"CODE",{});var lp=a(Vo);Ys=n(lp,"Dict[str, Dict[str, str]]"),lp.forEach(t),Js=n(Z,`) \u2014 A dictionary of dictionaries, with the high-level keys being the `),Ho=s(Z,"CODE",{});var pp=a(Ho);Ks=n(pp,"__init__"),pp.forEach(t),Qs=n(Z,` keyword name of each vocabulary file required by the model, the low-level being the `),Go=s(Z,"CODE",{});var mp=a(Go);Zs=n(mp,"short-cut-names"),mp.forEach(t),ea=n(Z," of the pretrained models with, as associated values, the "),Xo=s(Z,"CODE",{});var hp=a(Xo);ta=n(hp,"url"),hp.forEach(t),oa=n(Z,` to the associated pretrained vocabulary file.`),Z.forEach(t),na=c(O),V=s(O,"LI",{});var Ee=a(V);Yo=s(Ee,"STRONG",{});var up=a(Yo);ra=n(up,"max_model_input_sizes"),up.forEach(t),sa=n(Ee," ("),Jo=s(Ee,"CODE",{});var fp=a(Jo);aa=n(fp,"Dict[str, Optional[int]]"),fp.forEach(t),ia=n(Ee,") \u2014 A dictionary with, as keys, the "),Ko=s(Ee,"CODE",{});var _p=a(Ko);da=n(_p,"short-cut-names"),_p.forEach(t),ca=n(Ee,` of the pretrained models, and as associated values, the maximum length of the sequence inputs of this model, or `),Qo=s(Ee,"CODE",{});var gp=a(Qo);la=n(gp,"None"),gp.forEach(t),pa=n(Ee," if the model has no maximum input size."),Ee.forEach(t),ma=c(O),S=s(O,"LI",{});var ee=a(S);Zo=s(ee,"STRONG",{});var kp=a(Zo);ha=n(kp,"pretrained_init_configuration"),kp.forEach(t),ua=n(ee," ("),en=s(ee,"CODE",{});var bp=a(en);fa=n(bp,"Dict[str, Dict[str, Any]]"),bp.forEach(t),_a=n(ee,`) \u2014 A dictionary with, as keys, the `),tn=s(ee,"CODE",{});var vp=a(tn);ga=n(vp,"short-cut-names"),vp.forEach(t),ka=n(ee,` of the pretrained models, and as associated values, a dictionary of specific arguments to pass to the `),on=s(ee,"CODE",{});var Tp=a(on);ba=n(Tp,"__init__"),Tp.forEach(t),va=n(ee,` method of the tokenizer class for this pretrained model when loading the tokenizer with the `),ho=s(ee,"A",{href:!0});var yp=a(ho);Ta=n(yp,"from_pretrained()"),yp.forEach(t),ya=n(ee," method."),ee.forEach(t),wa=c(O),Ie=s(O,"LI",{});var $r=a(Ie);nn=s($r,"STRONG",{});var wp=a(nn);za=n(wp,"model_input_names"),wp.forEach(t),xa=n($r," ("),rn=s($r,"CODE",{});var zp=a(rn);$a=n(zp,"List[str]"),zp.forEach(t),Pa=n($r,") \u2014 A list of inputs expected in the forward pass of the model."),$r.forEach(t),qa=c(O),H=s(O,"LI",{});var Be=a(H);sn=s(Be,"STRONG",{});var xp=a(sn);Ea=n(xp,"padding_side"),xp.forEach(t),Ba=n(Be," ("),an=s(Be,"CODE",{});var $p=a(an);La=n($p,"str"),$p.forEach(t),Da=n(Be,`) \u2014 The default value for the side on which the model should have padding applied. Should be `),dn=s(Be,"CODE",{});var Pp=a(dn);Ia=n(Pp,"'right'"),Pp.forEach(t),Na=n(Be," or "),cn=s(Be,"CODE",{});var qp=a(cn);Aa=n(qp,"'left'"),qp.forEach(t),Fa=n(Be,"."),Be.forEach(t),Sa=c(O),G=s(O,"LI",{});var Le=a(G);ln=s(Le,"STRONG",{});var Ep=a(ln);Ca=n(Ep,"truncation_side"),Ep.forEach(t),Oa=n(Le," ("),pn=s(Le,"CODE",{});var Bp=a(pn);Wa=n(Bp,"str"),Bp.forEach(t),ja=n(Le,`) \u2014 The default value for the side on which the model should have truncation applied. Should be `),mn=s(Le,"CODE",{});var Lp=a(mn);Ma=n(Lp,"'right'"),Lp.forEach(t),Ra=n(Le," or "),hn=s(Le,"CODE",{});var Dp=a(hn);Ua=n(Dp,"'left'"),Dp.forEach(t),Va=n(Le,"."),Le.forEach(t),O.forEach(t),Ha=c(m),Ne=s(m,"DIV",{class:!0});var Or=a(Ne);f(rt.$$.fragment,Or),Ga=c(Or),un=s(Or,"P",{});var Ip=a(un);Xa=n(Ip,`Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences.`),Ip.forEach(t),Or.forEach(t),Ya=c(m),Ae=s(m,"DIV",{class:!0});var Wr=a(Ae);f(st.$$.fragment,Wr),Ja=c(Wr),fn=s(Wr,"P",{});var Np=a(fn);Ka=n(Np,`Temporarily sets the tokenizer for encoding the targets. Useful for tokenizer associated to sequence-to-sequence models that need a slightly different processing for the labels.`),Np.forEach(t),Wr.forEach(t),Qa=c(m),Fe=s(m,"DIV",{class:!0});var jr=a(Fe);f(at.$$.fragment,jr),Za=c(jr),_n=s(jr,"P",{});var Ap=a(_n);ei=n(Ap,"Convert a list of lists of token ids into a list of strings by calling decode."),Ap.forEach(t),jr.forEach(t),ti=c(m),oe=s(m,"DIV",{class:!0});var wo=a(oe);f(it.$$.fragment,wo),oi=c(wo),gn=s(wo,"P",{});var Fp=a(gn);ni=n(Fp,"Tokenize and prepare for the model a list of sequences or a list of pairs of sequences."),Fp.forEach(t),ri=c(wo),f(Se.$$.fragment,wo),wo.forEach(t),si=c(m),ne=s(m,"DIV",{class:!0});var zo=a(ne);f(dt.$$.fragment,zo),ai=c(zo),kn=s(zo,"P",{});var Sp=a(kn);ii=n(Sp,`Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens.`),Sp.forEach(t),di=c(zo),bn=s(zo,"P",{});var Cp=a(bn);ci=n(Cp,"This implementation does not add special tokens and this method should be overridden in a subclass."),Cp.forEach(t),zo.forEach(t),li=c(m),Ce=s(m,"DIV",{class:!0});var Mr=a(Ce);f(ct.$$.fragment,Mr),pi=c(Mr),vn=s(Mr,"P",{});var Op=a(vn);mi=n(Op,"Clean up a list of simple English tokenization artifacts like spaces before punctuations and abbreviated forms."),Op.forEach(t),Mr.forEach(t),hi=c(m),Oe=s(m,"DIV",{class:!0});var Rr=a(Oe);f(lt.$$.fragment,Rr),ui=c(Rr),pt=s(Rr,"P",{});var Ur=a(pt);fi=n(Ur,"Converts a sequence of tokens in a single string. The most simple way to do it is "),Tn=s(Ur,"CODE",{});var Wp=a(Tn);_i=n(Wp,'" ".join(tokens)'),Wp.forEach(t),gi=n(Ur,` but we often want to remove sub-word tokenization artifacts at the same time.`),Ur.forEach(t),Rr.forEach(t),ki=c(m),re=s(m,"DIV",{class:!0});var xo=a(re);f(mt.$$.fragment,xo),bi=c(xo),uo=s(xo,"P",{});var Gl=a(uo);vi=n(Gl,"Create the token type IDs corresponding to the sequences passed. "),fo=s(Gl,"A",{href:!0});var jp=a(fo);Ti=n(jp,`What are token type IDs?`),jp.forEach(t),Gl.forEach(t),yi=c(xo),yn=s(xo,"P",{});var Mp=a(yn);wi=n(Mp,"Should be overridden in a subclass if the model has a special way of building those."),Mp.forEach(t),xo.forEach(t),zi=c(m),se=s(m,"DIV",{class:!0});var $o=a(se);f(ht.$$.fragment,$o),xi=c($o),wn=s($o,"P",{});var Rp=a(wn);$i=n(Rp,`Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces.`),Rp.forEach(t),Pi=c($o),ut=s($o,"P",{});var Vr=a(ut);qi=n(Vr,"Similar to doing "),zn=s(Vr,"CODE",{});var Up=a(zn);Ei=n(Up,"self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))"),Up.forEach(t),Bi=n(Vr,"."),Vr.forEach(t),$o.forEach(t),Li=c(m),ae=s(m,"DIV",{class:!0});var Po=a(ae);f(ft.$$.fragment,Po),Di=c(Po),xn=s(Po,"P",{});var Vp=a(xn);Ii=n(Vp,"Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary."),Vp.forEach(t),Ni=c(Po),_t=s(Po,"P",{});var Hr=a(_t);Ai=n(Hr,"Same as doing "),$n=s(Hr,"CODE",{});var Hp=a($n);Fi=n(Hp,"self.convert_tokens_to_ids(self.tokenize(text))"),Hp.forEach(t),Si=n(Hr,"."),Hr.forEach(t),Po.forEach(t),Ci=c(m),ie=s(m,"DIV",{class:!0});var qo=a(ie);f(gt.$$.fragment,qo),Oi=c(qo),Pn=s(qo,"P",{});var Gp=a(Pn);Wi=n(Gp,"Tokenize and prepare for the model a sequence or a pair of sequences."),Gp.forEach(t),ji=c(qo),f(We.$$.fragment,qo),qo.forEach(t),Mi=c(m),C=s(m,"DIV",{class:!0});var he=a(C);f(kt.$$.fragment,he),Ri=c(he),bt=s(he,"P",{});var Gr=a(bt);Ui=n(Gr,"Instantiate a "),_o=s(Gr,"A",{href:!0});var Xp=a(_o);Vi=n(Xp,"PreTrainedTokenizerBase"),Xp.forEach(t),Hi=n(Gr,` (or a derived class) from a predefined tokenizer.`),Gr.forEach(t),Gi=c(he),f(je.$$.fragment,he),Xi=c(he),qn=s(he,"P",{});var Yp=a(qn);Yi=n(Yp,"Examples:"),Yp.forEach(t),Ji=c(he),f(vt.$$.fragment,he),he.forEach(t),Ki=c(m),Me=s(m,"DIV",{class:!0});var Xr=a(Me);f(Tt.$$.fragment,Xr),Qi=c(Xr),ve=s(Xr,"P",{});var Eo=a(ve);Zi=n(Eo,`Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `),En=s(Eo,"CODE",{});var Jp=a(En);ed=n(Jp,"prepare_for_model"),Jp.forEach(t),td=n(Eo," or "),Bn=s(Eo,"CODE",{});var Kp=a(Bn);od=n(Kp,"encode_plus"),Kp.forEach(t),nd=n(Eo," methods."),Eo.forEach(t),Xr.forEach(t),rd=c(m),de=s(m,"DIV",{class:!0});var Bo=a(de);f(yt.$$.fragment,Bo),sd=c(Bo),Ln=s(Bo,"P",{});var Qp=a(Ln);ad=n(Qp,"Returns the vocabulary as a dictionary of token to index."),Qp.forEach(t),id=c(Bo),ce=s(Bo,"P",{});var no=a(ce);Dn=s(no,"CODE",{});var Zp=a(Dn);dd=n(Zp,"tokenizer.get_vocab()[token]"),Zp.forEach(t),cd=n(no," is equivalent to "),In=s(no,"CODE",{});var em=a(In);ld=n(em,"tokenizer.convert_tokens_to_ids(token)"),em.forEach(t),pd=n(no," when "),Nn=s(no,"CODE",{});var tm=a(Nn);md=n(tm,"token"),tm.forEach(t),hd=n(no,` is in the vocab.`),no.forEach(t),Bo.forEach(t),ud=c(m),X=s(m,"DIV",{class:!0});var Qe=a(X);f(wt.$$.fragment,Qe),fd=c(Qe),An=s(Qe,"P",{});var om=a(An);_d=n(om,`Pad a single encoded input or a batch of encoded inputs up to predefined length or to the max sequence length in the batch.`),om.forEach(t),gd=c(Qe),Q=s(Qe,"P",{});var Ze=a(Q);kd=n(Ze,"Padding side (left/right) padding token ids are defined at the tokenizer level (with "),Fn=s(Ze,"CODE",{});var nm=a(Fn);bd=n(nm,"self.padding_side"),nm.forEach(t),vd=n(Ze,`, `),Sn=s(Ze,"CODE",{});var rm=a(Sn);Td=n(rm,"self.pad_token_id"),rm.forEach(t),yd=n(Ze," and "),Cn=s(Ze,"CODE",{});var sm=a(Cn);wd=n(sm,"self.pad_token_type_id"),sm.forEach(t),zd=n(Ze,")"),Ze.forEach(t),xd=c(Qe),f(Re.$$.fragment,Qe),Qe.forEach(t),$d=c(m),Ue=s(m,"DIV",{class:!0});var Yr=a(Ue);f(zt.$$.fragment,Yr),Pd=c(Yr),M=s(Yr,"P",{});var ue=a(M);qd=n(ue,`Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens. Please Note, for `),On=s(ue,"EM",{});var am=a(On);Ed=n(am,"pair_ids"),am.forEach(t),Bd=n(ue,` different than `),Wn=s(ue,"CODE",{});var im=a(Wn);Ld=n(im,"None"),im.forEach(t),Dd=n(ue," and "),jn=s(ue,"EM",{});var dm=a(jn);Id=n(dm,"truncation_strategy = longest_first"),dm.forEach(t),Nd=n(ue," or "),Mn=s(ue,"CODE",{});var cm=a(Mn);Ad=n(cm,"True"),cm.forEach(t),Fd=n(ue,`, it is not possible to return overflowing tokens. Such a combination of arguments will raise an error.`),ue.forEach(t),Yr.forEach(t),Sd=c(m),Ve=s(m,"DIV",{class:!0});var Jr=a(Ve);f(xt.$$.fragment,Jr),Cd=c(Jr),Rn=s(Jr,"P",{});var lm=a(Rn);Od=n(lm,"Prepare model inputs for translation. For best performance, translate one sentence at a time."),lm.forEach(t),Jr.forEach(t),Wd=c(m),Y=s(m,"DIV",{class:!0});var et=a(Y);f($t.$$.fragment,et),jd=c(et),Pt=s(et,"P",{});var Kr=a(Pt);Md=n(Kr,`Upload the tokenizer files to the \u{1F917} Model Hub while synchronizing a local clone of the repo in `),Un=s(Kr,"CODE",{});var pm=a(Un);Rd=n(pm,"repo_path_or_name"),pm.forEach(t),Ud=n(Kr,"."),Kr.forEach(t),Vd=c(et),Vn=s(et,"P",{});var mm=a(Vn);Hd=n(mm,"Examples:"),mm.forEach(t),Gd=c(et),f(qt.$$.fragment,et),et.forEach(t),Xd=c(m),le=s(m,"DIV",{class:!0});var Lo=a(le);f(Et.$$.fragment,Lo),Yd=c(Lo),Bt=s(Lo,"P",{});var Qr=a(Bt);Jd=n(Qr,`Register this class with a given auto class. This should only be used for custom tokenizers as the ones in the library are already mapped with `),Hn=s(Qr,"CODE",{});var hm=a(Hn);Kd=n(hm,"AutoTokenizer"),hm.forEach(t),Qd=n(Qr,"."),Qr.forEach(t),Zd=c(Lo),f(He.$$.fragment,Lo),Lo.forEach(t),ec=c(m),J=s(m,"DIV",{class:!0});var tt=a(J);f(Lt.$$.fragment,tt),tc=c(tt),Gn=s(tt,"P",{});var um=a(Gn);oc=n(um,"Save the full tokenizer state."),um.forEach(t),nc=c(tt),Dt=s(tt,"P",{});var Zr=a(Dt);rc=n(Zr,`This method make sure the full tokenizer can then be re-loaded using the `),Xn=s(Zr,"CODE",{});var fm=a(Xn);sc=n(fm,"from_pretrained"),fm.forEach(t),ac=n(Zr," class method.."),Zr.forEach(t),ic=c(tt),It=s(tt,"P",{});var es=a(It);dc=n(es,`Warning,None This won\u2019t save modifications you may have applied to the tokenizer after the instantiation (for instance, modifying `),Yn=s(es,"CODE",{});var _m=a(Yn);cc=n(_m,"tokenizer.do_lower_case"),_m.forEach(t),lc=n(es," after creation)."),es.forEach(t),tt.forEach(t),pc=c(m),pe=s(m,"DIV",{class:!0});var Do=a(pe);f(Nt.$$.fragment,Do),mc=c(Do),Jn=s(Do,"P",{});var gm=a(Jn);hc=n(gm,"Save only the vocabulary of the tokenizer (vocabulary + added tokens)."),gm.forEach(t),uc=c(Do),At=s(Do,"P",{});var ts=a(At);fc=n(ts,`This method won\u2019t save the configuration and special token mappings of the tokenizer. Use `),Kn=s(ts,"CODE",{});var km=a(Kn);_c=n(km,"_save_pretrained()"),km.forEach(t),gc=n(ts,"to save the whole state of the tokenizer."),ts.forEach(t),Do.forEach(t),kc=c(m),Ge=s(m,"DIV",{class:!0});var os=a(Ge);f(Ft.$$.fragment,os),bc=c(os),St=s(os,"P",{});var ns=a(St);vc=n(ns,"Converts a string in a sequence of tokens, replacing unknown tokens with the "),Qn=s(ns,"CODE",{});var bm=a(Qn);Tc=n(bm,"unk_token"),bm.forEach(t),yc=n(ns,"."),ns.forEach(t),os.forEach(t),wc=c(m),Xe=s(m,"DIV",{class:!0});var rs=a(Xe);f(Ct.$$.fragment,rs),zc=c(rs),Zn=s(rs,"P",{});var vm=a(Zn);xc=n(vm,"Truncates a sequence pair in-place following the strategy."),vm.forEach(t),rs.forEach(t),m.forEach(t),Lr=c(i),Te=s(i,"H2",{class:!0});var ss=a(Te);Ye=s(ss,"A",{id:!0,class:!0,href:!0});var Tm=a(Ye);er=s(Tm,"SPAN",{});var ym=a(er);f(Ot.$$.fragment,ym),ym.forEach(t),Tm.forEach(t),$c=c(ss),tr=s(ss,"SPAN",{});var wm=a(tr);Pc=n(wm,"SpecialTokensMixin"),wm.forEach(t),ss.forEach(t),Dr=c(i),A=s(i,"DIV",{class:!0});var fe=a(A);f(Wt.$$.fragment,fe),qc=c(fe),ye=s(fe,"P",{});var Io=a(ye);Ec=n(Io,"A mixin derived by "),go=s(Io,"A",{href:!0});var zm=a(go);Bc=n(zm,"PreTrainedTokenizer"),zm.forEach(t),Lc=n(Io," and "),ko=s(Io,"A",{href:!0});var xm=a(ko);Dc=n(xm,"PreTrainedTokenizerFast"),xm.forEach(t),Ic=n(Io,` to handle specific behaviors related to special tokens. In particular, this class hold the attributes which can be used to directly access these special tokens in a model-independent manner and allow to set and update the special tokens.`),Io.forEach(t),Nc=c(fe),q=s(fe,"DIV",{class:!0});var L=a(q);f(jt.$$.fragment,L),Ac=c(L),or=s(L,"P",{});var $m=a(or);Fc=n($m,`Add a dictionary of special tokens (eos, pad, cls, etc.) to the encoder and link them to class attributes. If special tokens are NOT in the vocabulary, they are added to it (indexed starting from the last index of the current vocabulary).`),$m.forEach(t),Sc=c(L),nr=s(L,"P",{});var Pm=a(nr);Cc=n(Pm,`Note,None When adding new tokens to the vocabulary, you should make sure to also resize the token embedding matrix of the model so that its embedding matrix matches the tokenizer.`),Pm.forEach(t),Oc=c(L),Mt=s(L,"P",{});var as=a(Mt);Wc=n(as,"In order to do that, please use the "),bo=s(as,"A",{href:!0});var qm=a(bo);jc=n(qm,"resize_token_embeddings()"),qm.forEach(t),Mc=n(as," method."),as.forEach(t),Rc=c(L),Rt=s(L,"P",{});var is=a(Rt);Uc=n(is,"Using "),rr=s(is,"CODE",{});var Em=a(rr);Vc=n(Em,"add_special_tokens"),Em.forEach(t),Hc=n(is," will ensure your special tokens can be used in several ways:"),is.forEach(t),Gc=c(L),Ut=s(L,"UL",{});var ds=a(Ut);sr=s(ds,"LI",{});var Bm=a(sr);Xc=n(Bm,"Special tokens are carefully handled by the tokenizer (they are never split)."),Bm.forEach(t),Yc=c(ds),Vt=s(ds,"LI",{});var cs=a(Vt);Jc=n(cs,"You can easily refer to special tokens using tokenizer class attributes like "),ar=s(cs,"CODE",{});var Lm=a(ar);Kc=n(Lm,"tokenizer.cls_token"),Lm.forEach(t),Qc=n(cs,`. This makes it easy to develop model-agnostic training and fine-tuning scripts.`),cs.forEach(t),ds.forEach(t),Zc=c(L),R=s(L,"P",{});var _e=a(R);el=n(_e,`When possible, special tokens are already registered for provided pretrained models (for instance `),vo=s(_e,"A",{href:!0});var Dm=a(vo);tl=n(Dm,"BertTokenizer"),Dm.forEach(t),ol=c(_e),ir=s(_e,"CODE",{});var Im=a(ir);nl=n(Im,"cls_token"),Im.forEach(t),rl=n(_e," is already registered to be :obj"),dr=s(_e,"EM",{});var Nm=a(dr);sl=n(Nm,"\u2019[CLS]\u2019"),Nm.forEach(t),al=n(_e,` and XLM\u2019s one is also registered to be `),cr=s(_e,"CODE",{});var Am=a(cr);il=n(Am,"'</s>'"),Am.forEach(t),dl=n(_e,")."),_e.forEach(t),cl=c(L),lr=s(L,"P",{});var Fm=a(lr);ll=n(Fm,"Examples:"),Fm.forEach(t),pl=c(L),f(Ht.$$.fragment,L),L.forEach(t),ml=c(fe),N=s(fe,"DIV",{class:!0});var K=a(N);f(Gt.$$.fragment,K),hl=c(K),pr=s(K,"P",{});var Sm=a(pr);ul=n(Sm,`Add a list of new tokens to the tokenizer class. If the new tokens are not in the vocabulary, they are added to it with indices starting from length of the current vocabulary.`),Sm.forEach(t),fl=c(K),mr=s(K,"P",{});var Cm=a(mr);_l=n(Cm,`Note,None When adding new tokens to the vocabulary, you should make sure to also resize the token embedding matrix of the model so that its embedding matrix matches the tokenizer.`),Cm.forEach(t),gl=c(K),Xt=s(K,"P",{});var ls=a(Xt);kl=n(ls,"In order to do that, please use the "),To=s(ls,"A",{href:!0});var Om=a(To);bl=n(Om,"resize_token_embeddings()"),Om.forEach(t),vl=n(ls," method."),ls.forEach(t),Tl=c(K),hr=s(K,"P",{});var Wm=a(hr);yl=n(Wm,"Examples:"),Wm.forEach(t),wl=c(K),f(Yt.$$.fragment,K),K.forEach(t),zl=c(fe),me=s(fe,"DIV",{class:!0});var No=a(me);f(Jt.$$.fragment,No),xl=c(No),we=s(No,"P",{});var Ao=a(we);$l=n(Ao,"Make sure that all the special tokens attributes of the tokenizer ("),ur=s(Ao,"CODE",{});var jm=a(ur);Pl=n(jm,"tokenizer.mask_token"),jm.forEach(t),ql=n(Ao,`, `),fr=s(Ao,"CODE",{});var Mm=a(fr);El=n(Mm,"tokenizer.cls_token"),Mm.forEach(t),Bl=n(Ao,", etc.) are in the vocabulary."),Ao.forEach(t),Ll=c(No),_r=s(No,"P",{});var Rm=a(_r);Dl=n(Rm,"Add the missing ones to the vocabulary if needed."),Rm.forEach(t),No.forEach(t),fe.forEach(t),Ir=c(i),ze=s(i,"H2",{class:!0});var ps=a(ze);Je=s(ps,"A",{id:!0,class:!0,href:!0});var Um=a(Je);gr=s(Um,"SPAN",{});var Vm=a(gr);f(Kt.$$.fragment,Vm),Vm.forEach(t),Um.forEach(t),Il=c(ps),kr=s(ps,"SPAN",{});var Hm=a(kr);Nl=n(Hm,"Enums and namedtuples"),Hm.forEach(t),ps.forEach(t),Nr=c(i),xe=s(i,"DIV",{class:!0});var ms=a(xe);f(Qt.$$.fragment,ms),Al=c(ms),$e=s(ms,"P",{});var Fo=a($e);Fl=n(Fo,"Possible values for the "),br=s(Fo,"CODE",{});var Gm=a(br);Sl=n(Gm,"truncation"),Gm.forEach(t),Cl=n(Fo," argument in "),Ke=s(Fo,"A",{href:!0});var hs=a(Ke);Ol=n(hs,"PreTrainedTokenizerBase."),vr=s(hs,"STRONG",{});var Xm=a(vr);Wl=n(Xm,"call"),Xm.forEach(t),jl=n(hs,"()"),hs.forEach(t),Ml=n(Fo,`. Useful for tab-completion in an IDE.`),Fo.forEach(t),ms.forEach(t),Ar=c(i),Pe=s(i,"DIV",{class:!0});var us=a(Pe);f(Zt.$$.fragment,us),Rl=c(us),Tr=s(us,"P",{});var Ym=a(Tr);Ul=n(Ym,"Character span in the original string."),Ym.forEach(t),us.forEach(t),Fr=c(i),qe=s(i,"DIV",{class:!0});var fs=a(qe);f(eo.$$.fragment,fs),Vl=c(fs),yr=s(fs,"P",{});var Jm=a(yr);Hl=n(Jm,"Token span in an encoded string (list of tokens)."),Jm.forEach(t),fs.forEach(t),this.h()},h(){l(h,"name","hf:doc:metadata"),l(h,"content",JSON.stringify(ih)),l(y,"id","utilities-for-tokenizers"),l(y,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(y,"href","#utilities-for-tokenizers"),l(v,"class","relative group"),l(so,"href","/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase"),l(ao,"href","/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),l(io,"href","/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast"),l(co,"href","/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.SpecialTokensMixin"),l(De,"id","transformers.PreTrainedTokenizerBase"),l(De,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(De,"href","#transformers.PreTrainedTokenizerBase"),l(ke,"class","relative group"),l(po,"href","/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),l(mo,"href","/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast"),l(ho,"href","/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.from_pretrained"),l(Ne,"class","docstring"),l(Ae,"class","docstring"),l(Fe,"class","docstring"),l(oe,"class","docstring"),l(ne,"class","docstring"),l(Ce,"class","docstring"),l(Oe,"class","docstring"),l(fo,"href","../glossary#token-type-ids"),l(re,"class","docstring"),l(se,"class","docstring"),l(ae,"class","docstring"),l(ie,"class","docstring"),l(_o,"href","/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase"),l(C,"class","docstring"),l(Me,"class","docstring"),l(de,"class","docstring"),l(X,"class","docstring"),l(Ue,"class","docstring"),l(Ve,"class","docstring"),l(Y,"class","docstring"),l(le,"class","docstring"),l(J,"class","docstring"),l(pe,"class","docstring"),l(Ge,"class","docstring"),l(Xe,"class","docstring"),l(p,"class","docstring"),l(Ye,"id","transformers.SpecialTokensMixin"),l(Ye,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Ye,"href","#transformers.SpecialTokensMixin"),l(Te,"class","relative group"),l(go,"href","/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer"),l(ko,"href","/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast"),l(bo,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.resize_token_embeddings"),l(vo,"href","/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer"),l(q,"class","docstring"),l(To,"href","/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.resize_token_embeddings"),l(N,"class","docstring"),l(me,"class","docstring"),l(A,"class","docstring"),l(Je,"id","transformers.tokenization_utils_base.TruncationStrategy"),l(Je,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(Je,"href","#transformers.tokenization_utils_base.TruncationStrategy"),l(ze,"class","relative group"),l(Ke,"href","/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__"),l(xe,"class","docstring"),l(Pe,"class","docstring"),l(qe,"class","docstring")},m(i,T){e(document.head,h),$(i,P,T),$(i,v,T),e(v,y),e(y,E),_(w,E,null),e(v,x),e(v,B),e(B,ge),$(i,j,T),$(i,D,T),e(D,ro),e(D,so),e(so,ks),e(D,bs),e(D,ao),e(ao,vs),e(D,Ts),e(D,io),e(io,ys),e(D,ws),e(D,co),e(co,zs),e(D,xs),$(i,qr,T),$(i,lo,T),e(lo,$s),$(i,Er,T),$(i,ke,T),e(ke,De),e(De,So),_(ot,So,null),e(ke,Ps),e(ke,Co),e(Co,qs),$(i,Br,T),$(i,p,T),_(nt,p,null),e(p,Es),e(p,be),e(be,Bs),e(be,po),e(po,Ls),e(be,Ds),e(be,mo),e(mo,Is),e(be,Ns),e(p,As),e(p,Oo),e(Oo,Fs),e(p,Ss),e(p,Wo),e(Wo,Cs),e(p,Os),e(p,I),e(I,te),e(te,jo),e(jo,Ws),e(te,js),e(te,Mo),e(Mo,Ms),e(te,Rs),e(te,Ro),e(Ro,Us),e(te,Vs),e(I,Hs),e(I,F),e(F,Uo),e(Uo,Gs),e(F,Xs),e(F,Vo),e(Vo,Ys),e(F,Js),e(F,Ho),e(Ho,Ks),e(F,Qs),e(F,Go),e(Go,Zs),e(F,ea),e(F,Xo),e(Xo,ta),e(F,oa),e(I,na),e(I,V),e(V,Yo),e(Yo,ra),e(V,sa),e(V,Jo),e(Jo,aa),e(V,ia),e(V,Ko),e(Ko,da),e(V,ca),e(V,Qo),e(Qo,la),e(V,pa),e(I,ma),e(I,S),e(S,Zo),e(Zo,ha),e(S,ua),e(S,en),e(en,fa),e(S,_a),e(S,tn),e(tn,ga),e(S,ka),e(S,on),e(on,ba),e(S,va),e(S,ho),e(ho,Ta),e(S,ya),e(I,wa),e(I,Ie),e(Ie,nn),e(nn,za),e(Ie,xa),e(Ie,rn),e(rn,$a),e(Ie,Pa),e(I,qa),e(I,H),e(H,sn),e(sn,Ea),e(H,Ba),e(H,an),e(an,La),e(H,Da),e(H,dn),e(dn,Ia),e(H,Na),e(H,cn),e(cn,Aa),e(H,Fa),e(I,Sa),e(I,G),e(G,ln),e(ln,Ca),e(G,Oa),e(G,pn),e(pn,Wa),e(G,ja),e(G,mn),e(mn,Ma),e(G,Ra),e(G,hn),e(hn,Ua),e(G,Va),e(p,Ha),e(p,Ne),_(rt,Ne,null),e(Ne,Ga),e(Ne,un),e(un,Xa),e(p,Ya),e(p,Ae),_(st,Ae,null),e(Ae,Ja),e(Ae,fn),e(fn,Ka),e(p,Qa),e(p,Fe),_(at,Fe,null),e(Fe,Za),e(Fe,_n),e(_n,ei),e(p,ti),e(p,oe),_(it,oe,null),e(oe,oi),e(oe,gn),e(gn,ni),e(oe,ri),_(Se,oe,null),e(p,si),e(p,ne),_(dt,ne,null),e(ne,ai),e(ne,kn),e(kn,ii),e(ne,di),e(ne,bn),e(bn,ci),e(p,li),e(p,Ce),_(ct,Ce,null),e(Ce,pi),e(Ce,vn),e(vn,mi),e(p,hi),e(p,Oe),_(lt,Oe,null),e(Oe,ui),e(Oe,pt),e(pt,fi),e(pt,Tn),e(Tn,_i),e(pt,gi),e(p,ki),e(p,re),_(mt,re,null),e(re,bi),e(re,uo),e(uo,vi),e(uo,fo),e(fo,Ti),e(re,yi),e(re,yn),e(yn,wi),e(p,zi),e(p,se),_(ht,se,null),e(se,xi),e(se,wn),e(wn,$i),e(se,Pi),e(se,ut),e(ut,qi),e(ut,zn),e(zn,Ei),e(ut,Bi),e(p,Li),e(p,ae),_(ft,ae,null),e(ae,Di),e(ae,xn),e(xn,Ii),e(ae,Ni),e(ae,_t),e(_t,Ai),e(_t,$n),e($n,Fi),e(_t,Si),e(p,Ci),e(p,ie),_(gt,ie,null),e(ie,Oi),e(ie,Pn),e(Pn,Wi),e(ie,ji),_(We,ie,null),e(p,Mi),e(p,C),_(kt,C,null),e(C,Ri),e(C,bt),e(bt,Ui),e(bt,_o),e(_o,Vi),e(bt,Hi),e(C,Gi),_(je,C,null),e(C,Xi),e(C,qn),e(qn,Yi),e(C,Ji),_(vt,C,null),e(p,Ki),e(p,Me),_(Tt,Me,null),e(Me,Qi),e(Me,ve),e(ve,Zi),e(ve,En),e(En,ed),e(ve,td),e(ve,Bn),e(Bn,od),e(ve,nd),e(p,rd),e(p,de),_(yt,de,null),e(de,sd),e(de,Ln),e(Ln,ad),e(de,id),e(de,ce),e(ce,Dn),e(Dn,dd),e(ce,cd),e(ce,In),e(In,ld),e(ce,pd),e(ce,Nn),e(Nn,md),e(ce,hd),e(p,ud),e(p,X),_(wt,X,null),e(X,fd),e(X,An),e(An,_d),e(X,gd),e(X,Q),e(Q,kd),e(Q,Fn),e(Fn,bd),e(Q,vd),e(Q,Sn),e(Sn,Td),e(Q,yd),e(Q,Cn),e(Cn,wd),e(Q,zd),e(X,xd),_(Re,X,null),e(p,$d),e(p,Ue),_(zt,Ue,null),e(Ue,Pd),e(Ue,M),e(M,qd),e(M,On),e(On,Ed),e(M,Bd),e(M,Wn),e(Wn,Ld),e(M,Dd),e(M,jn),e(jn,Id),e(M,Nd),e(M,Mn),e(Mn,Ad),e(M,Fd),e(p,Sd),e(p,Ve),_(xt,Ve,null),e(Ve,Cd),e(Ve,Rn),e(Rn,Od),e(p,Wd),e(p,Y),_($t,Y,null),e(Y,jd),e(Y,Pt),e(Pt,Md),e(Pt,Un),e(Un,Rd),e(Pt,Ud),e(Y,Vd),e(Y,Vn),e(Vn,Hd),e(Y,Gd),_(qt,Y,null),e(p,Xd),e(p,le),_(Et,le,null),e(le,Yd),e(le,Bt),e(Bt,Jd),e(Bt,Hn),e(Hn,Kd),e(Bt,Qd),e(le,Zd),_(He,le,null),e(p,ec),e(p,J),_(Lt,J,null),e(J,tc),e(J,Gn),e(Gn,oc),e(J,nc),e(J,Dt),e(Dt,rc),e(Dt,Xn),e(Xn,sc),e(Dt,ac),e(J,ic),e(J,It),e(It,dc),e(It,Yn),e(Yn,cc),e(It,lc),e(p,pc),e(p,pe),_(Nt,pe,null),e(pe,mc),e(pe,Jn),e(Jn,hc),e(pe,uc),e(pe,At),e(At,fc),e(At,Kn),e(Kn,_c),e(At,gc),e(p,kc),e(p,Ge),_(Ft,Ge,null),e(Ge,bc),e(Ge,St),e(St,vc),e(St,Qn),e(Qn,Tc),e(St,yc),e(p,wc),e(p,Xe),_(Ct,Xe,null),e(Xe,zc),e(Xe,Zn),e(Zn,xc),$(i,Lr,T),$(i,Te,T),e(Te,Ye),e(Ye,er),_(Ot,er,null),e(Te,$c),e(Te,tr),e(tr,Pc),$(i,Dr,T),$(i,A,T),_(Wt,A,null),e(A,qc),e(A,ye),e(ye,Ec),e(ye,go),e(go,Bc),e(ye,Lc),e(ye,ko),e(ko,Dc),e(ye,Ic),e(A,Nc),e(A,q),_(jt,q,null),e(q,Ac),e(q,or),e(or,Fc),e(q,Sc),e(q,nr),e(nr,Cc),e(q,Oc),e(q,Mt),e(Mt,Wc),e(Mt,bo),e(bo,jc),e(Mt,Mc),e(q,Rc),e(q,Rt),e(Rt,Uc),e(Rt,rr),e(rr,Vc),e(Rt,Hc),e(q,Gc),e(q,Ut),e(Ut,sr),e(sr,Xc),e(Ut,Yc),e(Ut,Vt),e(Vt,Jc),e(Vt,ar),e(ar,Kc),e(Vt,Qc),e(q,Zc),e(q,R),e(R,el),e(R,vo),e(vo,tl),e(R,ol),e(R,ir),e(ir,nl),e(R,rl),e(R,dr),e(dr,sl),e(R,al),e(R,cr),e(cr,il),e(R,dl),e(q,cl),e(q,lr),e(lr,ll),e(q,pl),_(Ht,q,null),e(A,ml),e(A,N),_(Gt,N,null),e(N,hl),e(N,pr),e(pr,ul),e(N,fl),e(N,mr),e(mr,_l),e(N,gl),e(N,Xt),e(Xt,kl),e(Xt,To),e(To,bl),e(Xt,vl),e(N,Tl),e(N,hr),e(hr,yl),e(N,wl),_(Yt,N,null),e(A,zl),e(A,me),_(Jt,me,null),e(me,xl),e(me,we),e(we,$l),e(we,ur),e(ur,Pl),e(we,ql),e(we,fr),e(fr,El),e(we,Bl),e(me,Ll),e(me,_r),e(_r,Dl),$(i,Ir,T),$(i,ze,T),e(ze,Je),e(Je,gr),_(Kt,gr,null),e(ze,Il),e(ze,kr),e(kr,Nl),$(i,Nr,T),$(i,xe,T),_(Qt,xe,null),e(xe,Al),e(xe,$e),e($e,Fl),e($e,br),e(br,Sl),e($e,Cl),e($e,Ke),e(Ke,Ol),e(Ke,vr),e(vr,Wl),e(Ke,jl),e($e,Ml),$(i,Ar,T),$(i,Pe,T),_(Zt,Pe,null),e(Pe,Rl),e(Pe,Tr),e(Tr,Ul),$(i,Fr,T),$(i,qe,T),_(eo,qe,null),e(qe,Vl),e(qe,yr),e(yr,Hl),Sr=!0},p(i,[T]){const to={};T&2&&(to.$$scope={dirty:T,ctx:i}),Se.$set(to);const wr={};T&2&&(wr.$$scope={dirty:T,ctx:i}),We.$set(wr);const zr={};T&2&&(zr.$$scope={dirty:T,ctx:i}),je.$set(zr);const xr={};T&2&&(xr.$$scope={dirty:T,ctx:i}),Re.$set(xr);const U={};T&2&&(U.$$scope={dirty:T,ctx:i}),He.$set(U)},i(i){Sr||(g(w.$$.fragment,i),g(ot.$$.fragment,i),g(nt.$$.fragment,i),g(rt.$$.fragment,i),g(st.$$.fragment,i),g(at.$$.fragment,i),g(it.$$.fragment,i),g(Se.$$.fragment,i),g(dt.$$.fragment,i),g(ct.$$.fragment,i),g(lt.$$.fragment,i),g(mt.$$.fragment,i),g(ht.$$.fragment,i),g(ft.$$.fragment,i),g(gt.$$.fragment,i),g(We.$$.fragment,i),g(kt.$$.fragment,i),g(je.$$.fragment,i),g(vt.$$.fragment,i),g(Tt.$$.fragment,i),g(yt.$$.fragment,i),g(wt.$$.fragment,i),g(Re.$$.fragment,i),g(zt.$$.fragment,i),g(xt.$$.fragment,i),g($t.$$.fragment,i),g(qt.$$.fragment,i),g(Et.$$.fragment,i),g(He.$$.fragment,i),g(Lt.$$.fragment,i),g(Nt.$$.fragment,i),g(Ft.$$.fragment,i),g(Ct.$$.fragment,i),g(Ot.$$.fragment,i),g(Wt.$$.fragment,i),g(jt.$$.fragment,i),g(Ht.$$.fragment,i),g(Gt.$$.fragment,i),g(Yt.$$.fragment,i),g(Jt.$$.fragment,i),g(Kt.$$.fragment,i),g(Qt.$$.fragment,i),g(Zt.$$.fragment,i),g(eo.$$.fragment,i),Sr=!0)},o(i){k(w.$$.fragment,i),k(ot.$$.fragment,i),k(nt.$$.fragment,i),k(rt.$$.fragment,i),k(st.$$.fragment,i),k(at.$$.fragment,i),k(it.$$.fragment,i),k(Se.$$.fragment,i),k(dt.$$.fragment,i),k(ct.$$.fragment,i),k(lt.$$.fragment,i),k(mt.$$.fragment,i),k(ht.$$.fragment,i),k(ft.$$.fragment,i),k(gt.$$.fragment,i),k(We.$$.fragment,i),k(kt.$$.fragment,i),k(je.$$.fragment,i),k(vt.$$.fragment,i),k(Tt.$$.fragment,i),k(yt.$$.fragment,i),k(wt.$$.fragment,i),k(Re.$$.fragment,i),k(zt.$$.fragment,i),k(xt.$$.fragment,i),k($t.$$.fragment,i),k(qt.$$.fragment,i),k(Et.$$.fragment,i),k(He.$$.fragment,i),k(Lt.$$.fragment,i),k(Nt.$$.fragment,i),k(Ft.$$.fragment,i),k(Ct.$$.fragment,i),k(Ot.$$.fragment,i),k(Wt.$$.fragment,i),k(jt.$$.fragment,i),k(Ht.$$.fragment,i),k(Gt.$$.fragment,i),k(Yt.$$.fragment,i),k(Jt.$$.fragment,i),k(Kt.$$.fragment,i),k(Qt.$$.fragment,i),k(Zt.$$.fragment,i),k(eo.$$.fragment,i),Sr=!1},d(i){t(h),i&&t(P),i&&t(v),b(w),i&&t(j),i&&t(D),i&&t(qr),i&&t(lo),i&&t(Er),i&&t(ke),b(ot),i&&t(Br),i&&t(p),b(nt),b(rt),b(st),b(at),b(it),b(Se),b(dt),b(ct),b(lt),b(mt),b(ht),b(ft),b(gt),b(We),b(kt),b(je),b(vt),b(Tt),b(yt),b(wt),b(Re),b(zt),b(xt),b($t),b(qt),b(Et),b(He),b(Lt),b(Nt),b(Ft),b(Ct),i&&t(Lr),i&&t(Te),b(Ot),i&&t(Dr),i&&t(A),b(Wt),b(jt),b(Ht),b(Gt),b(Yt),b(Jt),i&&t(Ir),i&&t(ze),b(Kt),i&&t(Nr),i&&t(xe),b(Qt),i&&t(Ar),i&&t(Pe),b(Zt),i&&t(Fr),i&&t(qe),b(eo)}}}const ih={local:"utilities-for-tokenizers",sections:[{local:"transformers.PreTrainedTokenizerBase",title:"PreTrainedTokenizerBase"},{local:"transformers.SpecialTokensMixin",title:"SpecialTokensMixin"},{local:"transformers.tokenization_utils_base.TruncationStrategy",title:"Enums and namedtuples"}],title:"Utilities for Tokenizers"};function dh(W,h,P){let{fw:v}=h;return W.$$set=y=>{"fw"in y&&P(0,v=y.fw)},[v]}class fh extends Km{constructor(h){super();Qm(this,h,dh,ah,Zm,{fw:0})}}export{fh as default,ih as metadata};
422
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_16143/en/_app/pages/internal/file_utils.mdx-23310578.js
import{S as kr,i as Dr,s as Sr,e as s,k as f,w as u,t as p,M as Tr,c as a,d as t,m,a as n,x as d,h as c,b as o,F as r,g as i,y as h,L as xr,q as _,o as v,B as g}from"../../chunks/vendor-4833417e.js";import{D as w}from"../../chunks/Docstring-4f315ed9.js";import{I as Le}from"../../chunks/IconCopyLink-4b81c553.js";function Ar(it){let b,ie,y,E,ce,C,ft,ue,mt,Ie,z,pt,de,ct,ut,ze,fe,dt,Me,N,M,he,G,ht,_e,_t,Ue,P,H,vt,ve,gt,Ve,k,F,$t,D,yt,ge,Et,bt,U,wt,$e,Nt,Pt,kt,Be,S,R,Dt,T,St,ye,Tt,xt,V,At,Ee,Lt,It,zt,Oe,x,B,be,J,Mt,we,Ut,qe,K,Q,je,W,X,Ce,Y,Z,Ge,ee,te,He,re,se,Fe,A,O,Ne,ae,Vt,Pe,Bt,Re,$,ne,Ot,ke,qt,jt,De,Ct,Gt,Se,Ht,Je,L,q,Te,le,Ft,xe,Rt,Ke,I,oe,Jt,Ae,Kt,Qe;return C=new Le({}),G=new Le({}),H=new w({props:{name:"class transformers.file_utils.ExplicitEnum",anchor:"transformers.file_utils.ExplicitEnum",parameters:[{name:"value",val:""},{name:"names",val:" = None"},{name:"module",val:" = None"},{name:"qualname",val:" = None"},{name:"type",val:" = None"},{name:"start",val:" = 1"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/file_utils.py#L2710"}}),F=new w({props:{name:"class transformers.file_utils.PaddingStrategy",anchor:"transformers.file_utils.PaddingStrategy",parameters:[{name:"value",val:""},{name:"names",val:" = None"},{name:"module",val:" = None"},{name:"qualname",val:" = None"},{name:"type",val:" = None"},{name:"start",val:" = 1"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/file_utils.py#L2722"}}),R=new w({props:{name:"class transformers.TensorType",anchor:"transformers.TensorType",parameters:[{name:"value",val:""},{name:"names",val:" = None"},{name:"module",val:" = None"},{name:"qualname",val:" = None"},{name:"type",val:" = None"},{name:"start",val:" = 1"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/file_utils.py#L2733"}}),J=new Le({}),Q=new w({props:{name:"transformers.add_start_docstrings",anchor:"transformers.add_start_docstrings",parameters:[{name:"*docstr",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/file_utils.py#L863"}}),X=new w({props:{name:"transformers.file_utils.add_start_docstrings_to_model_forward",anchor:"transformers.file_utils.add_start_docstrings_to_model_forward",parameters:[{name:"*docstr",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/file_utils.py#L871"}}),Z=new w({props:{name:"transformers.add_end_docstrings",anchor:"transformers.add_end_docstrings",parameters:[{name:"*docstr",val:""}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/file_utils.py#L893"}}),te=new w({props:{name:"transformers.file_utils.add_code_sample_docstrings",anchor:"transformers.file_utils.add_code_sample_docstrings",parameters:[{name:"*docstr",val:""},{name:"processor_class",val:" = None"},{name:"checkpoint",val:" = None"},{name:"output_type",val:" = None"},{name:"config_class",val:" = None"},{name:"mask",val:" = '[MASK]'"},{name:"model_cls",val:" = None"},{name:"modality",val:" = None"},{name:"expected_output",val:" = ''"},{name:"expected_loss",val:" = ''"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/file_utils.py#L1673"}}),se=new w({props:{name:"transformers.file_utils.replace_return_docstrings",anchor:"transformers.file_utils.replace_return_docstrings",parameters:[{name:"output_type",val:" = None"},{name:"config_class",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/file_utils.py#L1748"}}),ae=new Le({}),ne=new w({props:{name:"class transformers.file_utils.cached_property",anchor:"transformers.file_utils.cached_property",parameters:[{name:"fget",val:" = None"},{name:"fset",val:" = None"},{name:"fdel",val:" = None"},{name:"doc",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/file_utils.py#L2466"}}),le=new Le({}),oe=new w({props:{name:"class transformers._LazyModule",anchor:"transformers._LazyModule",parameters:[{name:"name",val:""},{name:"module_file",val:""},{name:"import_structure",val:""},{name:"module_spec",val:" = None"},{name:"extra_objects",val:" = None"}],source:"https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/file_utils.py#L2745"}}),{c(){b=s("meta"),ie=f(),y=s("h1"),E=s("a"),ce=s("span"),u(C.$$.fragment),ft=f(),ue=s("span"),mt=p("General Utilities"),Ie=f(),z=s("p"),pt=p("This page lists all of Transformers general utility functions that are found in the file "),de=s("code"),ct=p("file_utils.py"),ut=p("."),ze=f(),fe=s("p"),dt=p("Most of those are only useful if you are studying the general code in the library."),Me=f(),N=s("h2"),M=s("a"),he=s("span"),u(G.$$.fragment),ht=f(),_e=s("span"),_t=p("Enums and namedtuples"),Ue=f(),P=s("div"),u(H.$$.fragment),vt=f(),ve=s("p"),gt=p("Enum with more explicit error message for missing values."),Ve=f(),k=s("div"),u(F.$$.fragment),$t=f(),D=s("p"),yt=p("Possible values for the "),ge=s("code"),Et=p("padding"),bt=p(" argument in "),U=s("a"),wt=p("PreTrainedTokenizerBase."),$e=s("strong"),Nt=p("call"),Pt=p("()"),kt=p(`. Useful for tab-completion in an IDE.`),Be=f(),S=s("div"),u(R.$$.fragment),Dt=f(),T=s("p"),St=p("Possible values for the "),ye=s("code"),Tt=p("return_tensors"),xt=p(" argument in "),V=s("a"),At=p("PreTrainedTokenizerBase."),Ee=s("strong"),Lt=p("call"),It=p("()"),zt=p(`. Useful for tab-completion in an IDE.`),Oe=f(),x=s("h2"),B=s("a"),be=s("span"),u(J.$$.fragment),Mt=f(),we=s("span"),Ut=p("Special Decorators"),qe=f(),K=s("div"),u(Q.$$.fragment),je=f(),W=s("div"),u(X.$$.fragment),Ce=f(),Y=s("div"),u(Z.$$.fragment),Ge=f(),ee=s("div"),u(te.$$.fragment),He=f(),re=s("div"),u(se.$$.fragment),Fe=f(),A=s("h2"),O=s("a"),Ne=s("span"),u(ae.$$.fragment),Vt=f(),Pe=s("span"),Bt=p("Special Properties"),Re=f(),$=s("div"),u(ne.$$.fragment),Ot=f(),ke=s("p"),qt=p("Descriptor that mimics @property but caches output in member variable."),jt=f(),De=s("p"),Ct=p("From tensorflow_datasets"),Gt=f(),Se=s("p"),Ht=p("Built-in in functools from Python 3.8."),Je=f(),L=s("h2"),q=s("a"),Te=s("span"),u(le.$$.fragment),Ft=f(),xe=s("span"),Rt=p("Other Utilities"),Ke=f(),I=s("div"),u(oe.$$.fragment),Jt=f(),Ae=s("p"),Kt=p("Module class that surfaces all objects but only performs associated imports when the objects are requested."),this.h()},l(e){const l=Tr('[data-svelte="svelte-1phssyn"]',document.head);b=a(l,"META",{name:!0,content:!0}),l.forEach(t),ie=m(e),y=a(e,"H1",{class:!0});var We=n(y);E=a(We,"A",{id:!0,class:!0,href:!0});var Qt=n(E);ce=a(Qt,"SPAN",{});var Wt=n(ce);d(C.$$.fragment,Wt),Wt.forEach(t),Qt.forEach(t),ft=m(We),ue=a(We,"SPAN",{});var Xt=n(ue);mt=c(Xt,"General Utilities"),Xt.forEach(t),We.forEach(t),Ie=m(e),z=a(e,"P",{});var Xe=n(z);pt=c(Xe,"This page lists all of Transformers general utility functions that are found in the file "),de=a(Xe,"CODE",{});var Yt=n(de);ct=c(Yt,"file_utils.py"),Yt.forEach(t),ut=c(Xe,"."),Xe.forEach(t),ze=m(e),fe=a(e,"P",{});var Zt=n(fe);dt=c(Zt,"Most of those are only useful if you are studying the general code in the library."),Zt.forEach(t),Me=m(e),N=a(e,"H2",{class:!0});var Ye=n(N);M=a(Ye,"A",{id:!0,class:!0,href:!0});var er=n(M);he=a(er,"SPAN",{});var tr=n(he);d(G.$$.fragment,tr),tr.forEach(t),er.forEach(t),ht=m(Ye),_e=a(Ye,"SPAN",{});var rr=n(_e);_t=c(rr,"Enums and namedtuples"),rr.forEach(t),Ye.forEach(t),Ue=m(e),P=a(e,"DIV",{class:!0});var Ze=n(P);d(H.$$.fragment,Ze),vt=m(Ze),ve=a(Ze,"P",{});var sr=n(ve);gt=c(sr,"Enum with more explicit error message for missing values."),sr.forEach(t),Ze.forEach(t),Ve=m(e),k=a(e,"DIV",{class:!0});var et=n(k);d(F.$$.fragment,et),$t=m(et),D=a(et,"P",{});var me=n(D);yt=c(me,"Possible values for the "),ge=a(me,"CODE",{});var ar=n(ge);Et=c(ar,"padding"),ar.forEach(t),bt=c(me," argument in "),U=a(me,"A",{href:!0});var tt=n(U);wt=c(tt,"PreTrainedTokenizerBase."),$e=a(tt,"STRONG",{});var nr=n($e);Nt=c(nr,"call"),nr.forEach(t),Pt=c(tt,"()"),tt.forEach(t),kt=c(me,`. Useful for tab-completion in an IDE.`),me.forEach(t),et.forEach(t),Be=m(e),S=a(e,"DIV",{class:!0});var rt=n(S);d(R.$$.fragment,rt),Dt=m(rt),T=a(rt,"P",{});var pe=n(T);St=c(pe,"Possible values for the "),ye=a(pe,"CODE",{});var lr=n(ye);Tt=c(lr,"return_tensors"),lr.forEach(t),xt=c(pe," argument in "),V=a(pe,"A",{href:!0});var st=n(V);At=c(st,"PreTrainedTokenizerBase."),Ee=a(st,"STRONG",{});var or=n(Ee);Lt=c(or,"call"),or.forEach(t),It=c(st,"()"),st.forEach(t),zt=c(pe,`. Useful for tab-completion in an IDE.`),pe.forEach(t),rt.forEach(t),Oe=m(e),x=a(e,"H2",{class:!0});var at=n(x);B=a(at,"A",{id:!0,class:!0,href:!0});var ir=n(B);be=a(ir,"SPAN",{});var fr=n(be);d(J.$$.fragment,fr),fr.forEach(t),ir.forEach(t),Mt=m(at),we=a(at,"SPAN",{});var mr=n(we);Ut=c(mr,"Special Decorators"),mr.forEach(t),at.forEach(t),qe=m(e),K=a(e,"DIV",{class:!0});var pr=n(K);d(Q.$$.fragment,pr),pr.forEach(t),je=m(e),W=a(e,"DIV",{class:!0});var cr=n(W);d(X.$$.fragment,cr),cr.forEach(t),Ce=m(e),Y=a(e,"DIV",{class:!0});var ur=n(Y);d(Z.$$.fragment,ur),ur.forEach(t),Ge=m(e),ee=a(e,"DIV",{class:!0});var dr=n(ee);d(te.$$.fragment,dr),dr.forEach(t),He=m(e),re=a(e,"DIV",{class:!0});var hr=n(re);d(se.$$.fragment,hr),hr.forEach(t),Fe=m(e),A=a(e,"H2",{class:!0});var nt=n(A);O=a(nt,"A",{id:!0,class:!0,href:!0});var _r=n(O);Ne=a(_r,"SPAN",{});var vr=n(Ne);d(ae.$$.fragment,vr),vr.forEach(t),_r.forEach(t),Vt=m(nt),Pe=a(nt,"SPAN",{});var gr=n(Pe);Bt=c(gr,"Special Properties"),gr.forEach(t),nt.forEach(t),Re=m(e),$=a(e,"DIV",{class:!0});var j=n($);d(ne.$$.fragment,j),Ot=m(j),ke=a(j,"P",{});var $r=n(ke);qt=c($r,"Descriptor that mimics @property but caches output in member variable."),$r.forEach(t),jt=m(j),De=a(j,"P",{});var yr=n(De);Ct=c(yr,"From tensorflow_datasets"),yr.forEach(t),Gt=m(j),Se=a(j,"P",{});var Er=n(Se);Ht=c(Er,"Built-in in functools from Python 3.8."),Er.forEach(t),j.forEach(t),Je=m(e),L=a(e,"H2",{class:!0});var lt=n(L);q=a(lt,"A",{id:!0,class:!0,href:!0});var br=n(q);Te=a(br,"SPAN",{});var wr=n(Te);d(le.$$.fragment,wr),wr.forEach(t),br.forEach(t),Ft=m(lt),xe=a(lt,"SPAN",{});var Nr=n(xe);Rt=c(Nr,"Other Utilities"),Nr.forEach(t),lt.forEach(t),Ke=m(e),I=a(e,"DIV",{class:!0});var ot=n(I);d(oe.$$.fragment,ot),Jt=m(ot),Ae=a(ot,"P",{});var Pr=n(Ae);Kt=c(Pr,"Module class that surfaces all objects but only performs associated imports when the objects are requested."),Pr.forEach(t),ot.forEach(t),this.h()},h(){o(b,"name","hf:doc:metadata"),o(b,"content",JSON.stringify(Lr)),o(E,"id","general-utilities"),o(E,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),o(E,"href","#general-utilities"),o(y,"class","relative group"),o(M,"id","transformers.file_utils.ExplicitEnum"),o(M,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),o(M,"href","#transformers.file_utils.ExplicitEnum"),o(N,"class","relative group"),o(P,"class","docstring"),o(U,"href","/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__"),o(k,"class","docstring"),o(V,"href","/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__"),o(S,"class","docstring"),o(B,"id","transformers.add_start_docstrings"),o(B,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),o(B,"href","#transformers.add_start_docstrings"),o(x,"class","relative group"),o(K,"class","docstring"),o(W,"class","docstring"),o(Y,"class","docstring"),o(ee,"class","docstring"),o(re,"class","docstring"),o(O,"id","transformers.file_utils.cached_property"),o(O,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),o(O,"href","#transformers.file_utils.cached_property"),o(A,"class","relative group"),o($,"class","docstring"),o(q,"id","transformers._LazyModule"),o(q,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),o(q,"href","#transformers._LazyModule"),o(L,"class","relative group"),o(I,"class","docstring")},m(e,l){r(document.head,b),i(e,ie,l),i(e,y,l),r(y,E),r(E,ce),h(C,ce,null),r(y,ft),r(y,ue),r(ue,mt),i(e,Ie,l),i(e,z,l),r(z,pt),r(z,de),r(de,ct),r(z,ut),i(e,ze,l),i(e,fe,l),r(fe,dt),i(e,Me,l),i(e,N,l),r(N,M),r(M,he),h(G,he,null),r(N,ht),r(N,_e),r(_e,_t),i(e,Ue,l),i(e,P,l),h(H,P,null),r(P,vt),r(P,ve),r(ve,gt),i(e,Ve,l),i(e,k,l),h(F,k,null),r(k,$t),r(k,D),r(D,yt),r(D,ge),r(ge,Et),r(D,bt),r(D,U),r(U,wt),r(U,$e),r($e,Nt),r(U,Pt),r(D,kt),i(e,Be,l),i(e,S,l),h(R,S,null),r(S,Dt),r(S,T),r(T,St),r(T,ye),r(ye,Tt),r(T,xt),r(T,V),r(V,At),r(V,Ee),r(Ee,Lt),r(V,It),r(T,zt),i(e,Oe,l),i(e,x,l),r(x,B),r(B,be),h(J,be,null),r(x,Mt),r(x,we),r(we,Ut),i(e,qe,l),i(e,K,l),h(Q,K,null),i(e,je,l),i(e,W,l),h(X,W,null),i(e,Ce,l),i(e,Y,l),h(Z,Y,null),i(e,Ge,l),i(e,ee,l),h(te,ee,null),i(e,He,l),i(e,re,l),h(se,re,null),i(e,Fe,l),i(e,A,l),r(A,O),r(O,Ne),h(ae,Ne,null),r(A,Vt),r(A,Pe),r(Pe,Bt),i(e,Re,l),i(e,$,l),h(ne,$,null),r($,Ot),r($,ke),r(ke,qt),r($,jt),r($,De),r(De,Ct),r($,Gt),r($,Se),r(Se,Ht),i(e,Je,l),i(e,L,l),r(L,q),r(q,Te),h(le,Te,null),r(L,Ft),r(L,xe),r(xe,Rt),i(e,Ke,l),i(e,I,l),h(oe,I,null),r(I,Jt),r(I,Ae),r(Ae,Kt),Qe=!0},p:xr,i(e){Qe||(_(C.$$.fragment,e),_(G.$$.fragment,e),_(H.$$.fragment,e),_(F.$$.fragment,e),_(R.$$.fragment,e),_(J.$$.fragment,e),_(Q.$$.fragment,e),_(X.$$.fragment,e),_(Z.$$.fragment,e),_(te.$$.fragment,e),_(se.$$.fragment,e),_(ae.$$.fragment,e),_(ne.$$.fragment,e),_(le.$$.fragment,e),_(oe.$$.fragment,e),Qe=!0)},o(e){v(C.$$.fragment,e),v(G.$$.fragment,e),v(H.$$.fragment,e),v(F.$$.fragment,e),v(R.$$.fragment,e),v(J.$$.fragment,e),v(Q.$$.fragment,e),v(X.$$.fragment,e),v(Z.$$.fragment,e),v(te.$$.fragment,e),v(se.$$.fragment,e),v(ae.$$.fragment,e),v(ne.$$.fragment,e),v(le.$$.fragment,e),v(oe.$$.fragment,e),Qe=!1},d(e){t(b),e&&t(ie),e&&t(y),g(C),e&&t(Ie),e&&t(z),e&&t(ze),e&&t(fe),e&&t(Me),e&&t(N),g(G),e&&t(Ue),e&&t(P),g(H),e&&t(Ve),e&&t(k),g(F),e&&t(Be),e&&t(S),g(R),e&&t(Oe),e&&t(x),g(J),e&&t(qe),e&&t(K),g(Q),e&&t(je),e&&t(W),g(X),e&&t(Ce),e&&t(Y),g(Z),e&&t(Ge),e&&t(ee),g(te),e&&t(He),e&&t(re),g(se),e&&t(Fe),e&&t(A),g(ae),e&&t(Re),e&&t($),g(ne),e&&t(Je),e&&t(L),g(le),e&&t(Ke),e&&t(I),g(oe)}}}const Lr={local:"general-utilities",sections:[{local:"transformers.file_utils.ExplicitEnum",title:"Enums and namedtuples"},{local:"transformers.add_start_docstrings",title:"Special Decorators"},{local:"transformers.file_utils.cached_property",title:"Special Properties"},{local:"transformers._LazyModule",title:"Other Utilities"}],title:"General Utilities"};function Ir(it,b,ie){let{fw:y}=b;return it.$$set=E=>{"fw"in E&&ie(0,y=E.fw)},[y]}class Vr extends kr{constructor(b){super();Dr(this,b,Ir,Ar,Sr,{fw:0})}}export{Vr as default,Lr as metadata};
423
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/main_classes/data_collator.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;data-collator&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.default_data_collator&quot;,&quot;title&quot;:&quot;Default data collator&quot;},{&quot;local&quot;:&quot;transformers.DefaultDataCollator&quot;,&quot;title&quot;:&quot;DefaultDataCollator&quot;},{&quot;local&quot;:&quot;transformers.DataCollatorWithPadding&quot;,&quot;title&quot;:&quot;DataCollatorWithPadding&quot;},{&quot;local&quot;:&quot;transformers.DataCollatorForTokenClassification&quot;,&quot;title&quot;:&quot;DataCollatorForTokenClassification&quot;},{&quot;local&quot;:&quot;transformers.DataCollatorForSeq2Seq&quot;,&quot;title&quot;:&quot;DataCollatorForSeq2Seq&quot;},{&quot;local&quot;:&quot;transformers.DataCollatorForLanguageModeling&quot;,&quot;title&quot;:&quot;DataCollatorForLanguageModeling&quot;},{&quot;local&quot;:&quot;transformers.DataCollatorForWholeWordMask&quot;,&quot;title&quot;:&quot;DataCollatorForWholeWordMask&quot;},{&quot;local&quot;:&quot;transformers.DataCollatorForPermutationLanguageModeling&quot;,&quot;title&quot;:&quot;DataCollatorForPermutationLanguageModeling&quot;}],&quot;title&quot;:&quot;Data Collator&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/main_classes/data_collator.mdx-5221ee9b.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <h1 class="relative group"><a id="data-collator" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#data-collator"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Data Collator </span></h1> <p>Data collators are objects that will form a batch by using a list of dataset elements as input. These elements are of the same type as the elements of <code>train_dataset</code> or <code>eval_dataset</code>.</p> <p>To be able to build batches, data collators may apply some processing (like padding). Some of them (like <a href="/docs/transformers/pr_16143/en/main_classes/data_collator#transformers.DataCollatorForLanguageModeling">DataCollatorForLanguageModeling</a>) also apply some random data augmentation (like random masking) on the formed batch.</p> <p>Examples of use can be found in the <a href="../examples">example scripts</a> or <a href="../notebooks">example notebooks</a>.</p> <h2 class="relative group"><a id="transformers.default_data_collator" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.default_data_collator"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Default data collator </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.default_data_collator"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.default_data_collator</span></h4><!-- HTML_TAG_END --> <a id="transformers.default_data_collator" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.default_data_collator"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/data_collator.py#L48" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">features<span class="opacity-60">: typing.List[InputDataClass]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60"> = &#39;pt&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Very simple data collator that simply collates batches of dict-like objects and performs special handling for potential keys named:</p> <ul><li><code>label</code>: handles a single value (int or float) per object</li> <li><code>label_ids</code>: handles a list of values per object</li></ul> <p>Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs to the model. See glue and ner for example of how it’s useful.</p></div> <h2 class="relative group"><a id="transformers.DefaultDataCollator" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DefaultDataCollator"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>DefaultDataCollator </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DefaultDataCollator"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">DefaultDataCollator</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.DefaultDataCollator" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DefaultDataCollator"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/data_collator.py#L74" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: str = &#39;pt&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DefaultDataCollator.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DefaultDataCollator.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>str</code>) &#x2014; The type of Tensor to return. Allowable values are &#x201C;np&#x201D;, &#x201C;pt&#x201D; and &#x201C;tf&#x201D;.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Very simple data collator that simply collates batches of dict-like objects and performs special handling for potential keys named:</p> <ul><li><code>label</code>: handles a single value (int or float) per object</li> <li><code>label_ids</code>: handles a list of values per object</li></ul> <p>Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs to the model. See glue and ner for example of how it’s useful.</p> <p>This is an object (like other data collators) rather than a pure function like default_data_collator. This can be helpful if you need to set a return_tensors value at initialization.</p></div> <h2 class="relative group"><a id="transformers.DataCollatorWithPadding" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorWithPadding"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>DataCollatorWithPadding </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DataCollatorWithPadding"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">DataCollatorWithPadding</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.DataCollatorWithPadding" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DataCollatorWithPadding"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/data_collator.py#L212" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer<span class="opacity-60">: PreTrainedTokenizerBase</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">padding<span class="opacity-60">: typing.Union[bool, str, transformers.file_utils.PaddingStrategy] = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_to_multiple_of<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: str = &#39;pt&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DataCollatorWithPadding.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorWithPadding.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a> or <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>) &#x2014; The tokenizer used for encoding the data.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DataCollatorWithPadding.padding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorWithPadding.padding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Select a strategy to pad the returned sequences (according to the model&#x2019;s padding side and padding index) among:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DataCollatorWithPadding.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorWithPadding.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Maximum length of the returned list and optionally padding length (see above).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DataCollatorWithPadding.pad_to_multiple_of" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorWithPadding.pad_to_multiple_of"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value.</p> <p>This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DataCollatorWithPadding.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorWithPadding.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>str</code>) &#x2014; The type of Tensor to return. Allowable values are &#x201C;np&#x201D;, &#x201C;pt&#x201D; and &#x201C;tf&#x201D;.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Data collator that will dynamically pad the inputs received.</p></div> <h2 class="relative group"><a id="transformers.DataCollatorForTokenClassification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorForTokenClassification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>DataCollatorForTokenClassification </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DataCollatorForTokenClassification"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">DataCollatorForTokenClassification</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.DataCollatorForTokenClassification" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DataCollatorForTokenClassification"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/data_collator.py#L264" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer<span class="opacity-60">: PreTrainedTokenizerBase</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">padding<span class="opacity-60">: typing.Union[bool, str, transformers.file_utils.PaddingStrategy] = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_to_multiple_of<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">label_pad_token_id<span class="opacity-60">: int = -100</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: str = &#39;pt&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DataCollatorForTokenClassification.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorForTokenClassification.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a> or <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>) &#x2014; The tokenizer used for encoding the data.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DataCollatorForTokenClassification.padding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorForTokenClassification.padding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Select a strategy to pad the returned sequences (according to the model&#x2019;s padding side and padding index) among:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DataCollatorForTokenClassification.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorForTokenClassification.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Maximum length of the returned list and optionally padding length (see above).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DataCollatorForTokenClassification.pad_to_multiple_of" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorForTokenClassification.pad_to_multiple_of"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value.</p> <p>This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DataCollatorForTokenClassification.label_pad_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorForTokenClassification.label_pad_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>label_pad_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to -100) &#x2014; The id to use when padding the labels (-100 will be automatically ignore by PyTorch loss functions).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DataCollatorForTokenClassification.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorForTokenClassification.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>str</code>) &#x2014; The type of Tensor to return. Allowable values are &#x201C;np&#x201D;, &#x201C;pt&#x201D; and &#x201C;tf&#x201D;.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Data collator that will dynamically pad the inputs received, as well as the labels.</p></div> <h2 class="relative group"><a id="transformers.DataCollatorForSeq2Seq" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorForSeq2Seq"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>DataCollatorForSeq2Seq </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DataCollatorForSeq2Seq"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">DataCollatorForSeq2Seq</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.DataCollatorForSeq2Seq" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DataCollatorForSeq2Seq"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/data_collator.py#L514" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer<span class="opacity-60">: PreTrainedTokenizerBase</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model<span class="opacity-60">: typing.Optional[typing.Any] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">padding<span class="opacity-60">: typing.Union[bool, str, transformers.file_utils.PaddingStrategy] = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_to_multiple_of<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">label_pad_token_id<span class="opacity-60">: int = -100</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: str = &#39;pt&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DataCollatorForSeq2Seq.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorForSeq2Seq.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a> or <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>) &#x2014; The tokenizer used for encoding the data.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DataCollatorForSeq2Seq.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorForSeq2Seq.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>) &#x2014; The model that is being trained. If set and has the <em>prepare_decoder_input_ids_from_labels</em>, use it to prepare the <em>decoder_input_ids</em></p> <p>This is useful when using <em>label_smoothing</em> to avoid calculating loss twice.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DataCollatorForSeq2Seq.padding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorForSeq2Seq.padding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Select a strategy to pad the returned sequences (according to the model&#x2019;s padding side and padding index) among:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence is provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DataCollatorForSeq2Seq.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorForSeq2Seq.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Maximum length of the returned list and optionally padding length (see above).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DataCollatorForSeq2Seq.pad_to_multiple_of" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorForSeq2Seq.pad_to_multiple_of"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value.</p> <p>This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DataCollatorForSeq2Seq.label_pad_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorForSeq2Seq.label_pad_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>label_pad_token_id</strong> (<code>int</code>, <em>optional</em>, defaults to -100) &#x2014; The id to use when padding the labels (-100 will be automatically ignored by PyTorch loss functions).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DataCollatorForSeq2Seq.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorForSeq2Seq.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>str</code>) &#x2014; The type of Tensor to return. Allowable values are &#x201C;np&#x201D;, &#x201C;pt&#x201D; and &#x201C;tf&#x201D;.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Data collator that will dynamically pad the inputs received, as well as the labels.</p></div> <h2 class="relative group"><a id="transformers.DataCollatorForLanguageModeling" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorForLanguageModeling"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>DataCollatorForLanguageModeling </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DataCollatorForLanguageModeling"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">DataCollatorForLanguageModeling</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.DataCollatorForLanguageModeling" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DataCollatorForLanguageModeling"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/data_collator.py#L607" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer<span class="opacity-60">: PreTrainedTokenizerBase</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mlm<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mlm_probability<span class="opacity-60">: float = 0.15</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_to_multiple_of<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tf_experimental_compile<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: str = &#39;pt&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DataCollatorForLanguageModeling.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorForLanguageModeling.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a> or <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>) &#x2014; The tokenizer used for encoding the data.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DataCollatorForLanguageModeling.mlm" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorForLanguageModeling.mlm"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mlm</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to use masked language modeling. If set to <code>False</code>, the labels are the same as the inputs with the padding tokens ignored (by setting them to -100). Otherwise, the labels are -100 for non-masked tokens and the value to predict for the masked token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DataCollatorForLanguageModeling.mlm_probability" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorForLanguageModeling.mlm_probability"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mlm_probability</strong> (<code>float</code>, <em>optional</em>, defaults to 0.15) &#x2014; The probability with which to (randomly) mask tokens in the input, when <code>mlm</code> is set to <code>True</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DataCollatorForLanguageModeling.pad_to_multiple_of" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorForLanguageModeling.pad_to_multiple_of"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DataCollatorForLanguageModeling.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorForLanguageModeling.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>str</code>) &#x2014; The type of Tensor to return. Allowable values are &#x201C;np&#x201D;, &#x201C;pt&#x201D; and &#x201C;tf&#x201D;.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Data collator used for language modeling. Inputs are dynamically padded to the maximum length of a batch if they are not all of the same length.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>For best performance, this data collator should be used with a dataset having items that are dictionaries or BatchEncoding, with the <code>&quot;special_tokens_mask&quot;</code> key, as returned by a <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a> or a <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a> with the argument <code>return_special_tokens_mask=True</code>.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DataCollatorForLanguageModeling.numpy_mask_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>numpy_mask_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.DataCollatorForLanguageModeling.numpy_mask_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DataCollatorForLanguageModeling.numpy_mask_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/data_collator.py#L805" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60">: typing.Any</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">special_tokens_mask<span class="opacity-60">: typing.Optional[typing.Any] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DataCollatorForLanguageModeling.tf_mask_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>tf_mask_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.DataCollatorForLanguageModeling.tf_mask_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DataCollatorForLanguageModeling.tf_mask_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/data_collator.py#L659" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60">: typing.Any</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_size<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_token_id<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">special_tokens_mask<span class="opacity-60">: typing.Optional[typing.Any] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DataCollatorForLanguageModeling.torch_mask_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>torch_mask_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.DataCollatorForLanguageModeling.torch_mask_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DataCollatorForLanguageModeling.torch_mask_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/data_collator.py#L748" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60">: typing.Any</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">special_tokens_mask<span class="opacity-60">: typing.Optional[typing.Any] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.</p></div></div> <h2 class="relative group"><a id="transformers.DataCollatorForWholeWordMask" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorForWholeWordMask"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>DataCollatorForWholeWordMask </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DataCollatorForWholeWordMask"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">DataCollatorForWholeWordMask</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.DataCollatorForWholeWordMask" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DataCollatorForWholeWordMask"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/data_collator.py#L846" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer<span class="opacity-60">: PreTrainedTokenizerBase</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mlm<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mlm_probability<span class="opacity-60">: float = 0.15</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_to_multiple_of<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tf_experimental_compile<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: str = &#39;pt&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Data collator used for language modeling that masks entire words.</p> <ul><li>collates batches of tensors, honoring their tokenizer’s pad_token</li> <li>preprocesses batches for masked language modeling</li></ul> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>This collator relies on details of the implementation of subword tokenization by <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>, specifically that subword tokens are prefixed with <em>##</em>. For tokenizers that do not adhere to this scheme, this collator will produce an output that is roughly equivalent to <code>.DataCollatorForLanguageModeling</code>.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DataCollatorForWholeWordMask.numpy_mask_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>numpy_mask_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.DataCollatorForWholeWordMask.numpy_mask_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DataCollatorForWholeWordMask.numpy_mask_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/data_collator.py#L1072" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60">: typing.Any</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_labels<span class="opacity-60">: typing.Any</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set ‘mask_labels’ means we use whole word mask (wwm), we directly mask idxs according to it’s ref.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DataCollatorForWholeWordMask.tf_mask_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>tf_mask_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.DataCollatorForWholeWordMask.tf_mask_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DataCollatorForWholeWordMask.tf_mask_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/data_collator.py#L1031" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60">: typing.Any</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_labels<span class="opacity-60">: typing.Any</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set ‘mask_labels’ means we use whole word mask (wwm), we directly mask idxs according to it’s ref.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DataCollatorForWholeWordMask.torch_mask_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>torch_mask_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.DataCollatorForWholeWordMask.torch_mask_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DataCollatorForWholeWordMask.torch_mask_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/data_collator.py#L992" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60">: typing.Any</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask_labels<span class="opacity-60">: typing.Any</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set ‘mask_labels’ means we use whole word mask (wwm), we directly mask idxs according to it’s ref.</p></div></div> <h2 class="relative group"><a id="transformers.DataCollatorForPermutationLanguageModeling" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataCollatorForPermutationLanguageModeling"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>DataCollatorForPermutationLanguageModeling </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DataCollatorForPermutationLanguageModeling"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">DataCollatorForPermutationLanguageModeling</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.DataCollatorForPermutationLanguageModeling" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DataCollatorForPermutationLanguageModeling"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/data_collator.py#L1197" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer<span class="opacity-60">: PreTrainedTokenizerBase</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">plm_probability<span class="opacity-60">: float = 0.16666666666666666</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_span_length<span class="opacity-60">: int = 5</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: str = &#39;pt&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Data collator used for permutation language modeling.</p> <ul><li>collates batches of tensors, honoring their tokenizer’s pad_token</li> <li>preprocesses batches for permutation language modeling with procedures specific to XLNet</li></ul> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DataCollatorForPermutationLanguageModeling.numpy_mask_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>numpy_mask_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.DataCollatorForPermutationLanguageModeling.numpy_mask_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DataCollatorForPermutationLanguageModeling.numpy_mask_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/data_collator.py#L1436" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60">: typing.Any</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>The masked tokens to be predicted for a particular sequence are determined by the following algorithm:</p> <ol start="0"><li>Start from the beginning of the sequence by setting <code>cur_len = 0</code> (number of tokens processed so far).</li> <li>Sample a <code>span_length</code> from the interval <code>[1, max_span_length]</code> (length of span of tokens to be masked)</li> <li>Reserve a context of length <code>context_length = span_length / plm_probability</code> to surround span to be masked</li> <li>Sample a starting point <code>start_index</code> from the interval <code>[cur_len, cur_len + context_length - span_length]</code> and mask tokens <code>start_index:start_index + span_length</code></li> <li>Set <code>cur_len = cur_len + context_length</code>. If <code>cur_len &lt; max_len</code> (i.e. there are tokens remaining in the sequence to be processed), repeat from Step 1.</li></ol></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DataCollatorForPermutationLanguageModeling.tf_mask_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>tf_mask_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.DataCollatorForPermutationLanguageModeling.tf_mask_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DataCollatorForPermutationLanguageModeling.tf_mask_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/data_collator.py#L1328" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60">: typing.Any</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>The masked tokens to be predicted for a particular sequence are determined by the following algorithm:</p> <ol start="0"><li>Start from the beginning of the sequence by setting <code>cur_len = 0</code> (number of tokens processed so far).</li> <li>Sample a <code>span_length</code> from the interval <code>[1, max_span_length]</code> (length of span of tokens to be masked)</li> <li>Reserve a context of length <code>context_length = span_length / plm_probability</code> to surround span to be masked</li> <li>Sample a starting point <code>start_index</code> from the interval <code>[cur_len, cur_len + context_length - span_length]</code> and mask tokens <code>start_index:start_index + span_length</code></li> <li>Set <code>cur_len = cur_len + context_length</code>. If <code>cur_len &lt; max_len</code> (i.e. there are tokens remaining in the sequence to be processed), repeat from Step 1.</li></ol></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DataCollatorForPermutationLanguageModeling.torch_mask_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>torch_mask_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.DataCollatorForPermutationLanguageModeling.torch_mask_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DataCollatorForPermutationLanguageModeling.torch_mask_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/data_collator.py#L1231" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60">: typing.Any</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>The masked tokens to be predicted for a particular sequence are determined by the following algorithm:</p> <ol start="0"><li>Start from the beginning of the sequence by setting <code>cur_len = 0</code> (number of tokens processed so far).</li> <li>Sample a <code>span_length</code> from the interval <code>[1, max_span_length]</code> (length of span of tokens to be masked)</li> <li>Reserve a context of length <code>context_length = span_length / plm_probability</code> to surround span to be masked</li> <li>Sample a starting point <code>start_index</code> from the interval <code>[cur_len, cur_len + context_length - span_length]</code> and mask tokens <code>start_index:start_index + span_length</code></li> <li>Set <code>cur_len = cur_len + context_length</code>. If <code>cur_len &lt; max_len</code> (i.e. there are tokens remaining in the sequence to be processed), repeat from Step 1.</li></ol></div></div> <script type="module" data-hydrate="14bvo5"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="14bvo5"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/main_classes/data_collator.mdx-5221ee9b.js") ], params: {} } }); </script>
424
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/main_classes/callback.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;callbacks&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.integrations.CometCallback&quot;,&quot;title&quot;:&quot;Available Callbacks&quot;},{&quot;local&quot;:&quot;transformers.TrainerCallback&quot;,&quot;title&quot;:&quot;TrainerCallback&quot;},{&quot;local&quot;:&quot;transformers.TrainerState&quot;,&quot;title&quot;:&quot;TrainerState&quot;},{&quot;local&quot;:&quot;transformers.TrainerControl&quot;,&quot;title&quot;:&quot;TrainerControl&quot;}],&quot;title&quot;:&quot;Callbacks&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/main_classes/callback.mdx-9aed4ad7.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="callbacks" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#callbacks"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Callbacks </span></h1> <p>Callbacks are objects that can customize the behavior of the training loop in the PyTorch <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> (this feature is not yet implemented in TensorFlow) that can inspect the training loop state (for progress reporting, logging on TensorBoard or other ML platforms…) and take decisions (like early stopping).</p> <p>Callbacks are “read only” pieces of code, apart from the <a href="/docs/transformers/pr_16143/en/main_classes/callback#transformers.TrainerControl">TrainerControl</a> object they return, they cannot change anything in the training loop. For customizations that require changes in the training loop, you should subclass <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> and override the methods you need (see <a href="trainer">trainer</a> for examples).</p> <p>By default a <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> will use the following callbacks:</p> <ul><li><a href="/docs/transformers/pr_16143/en/main_classes/callback#transformers.DefaultFlowCallback">DefaultFlowCallback</a> which handles the default behavior for logging, saving and evaluation.</li> <li><a href="/docs/transformers/pr_16143/en/main_classes/callback#transformers.PrinterCallback">PrinterCallback</a> or <a href="/docs/transformers/pr_16143/en/main_classes/callback#transformers.ProgressCallback">ProgressCallback</a> to display progress and print the logs (the first one is used if you deactivate tqdm through the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a>, otherwise it’s the second one).</li> <li><a href="/docs/transformers/pr_16143/en/main_classes/callback#transformers.integrations.TensorBoardCallback">TensorBoardCallback</a> if tensorboard is accessible (either through PyTorch &gt;= 1.4 or tensorboardX).</li> <li><a href="/docs/transformers/pr_16143/en/main_classes/callback#transformers.integrations.WandbCallback">WandbCallback</a> if <a href="https://www.wandb.com/" rel="nofollow">wandb</a> is installed.</li> <li><a href="/docs/transformers/pr_16143/en/main_classes/callback#transformers.integrations.CometCallback">CometCallback</a> if <a href="https://www.comet.ml/site/" rel="nofollow">comet_ml</a> is installed.</li> <li><a href="/docs/transformers/pr_16143/en/main_classes/callback#transformers.integrations.MLflowCallback">MLflowCallback</a> if <a href="https://www.mlflow.org/" rel="nofollow">mlflow</a> is installed.</li> <li><a href="/docs/transformers/pr_16143/en/main_classes/callback#transformers.integrations.AzureMLCallback">AzureMLCallback</a> if <a href="https://pypi.org/project/azureml-sdk/" rel="nofollow">azureml-sdk</a> is installed.</li> <li><a href="/docs/transformers/pr_16143/en/main_classes/callback#transformers.integrations.CodeCarbonCallback">CodeCarbonCallback</a> if <a href="https://pypi.org/project/codecarbon/" rel="nofollow">codecarbon</a> is installed.</li></ul> <p>The main class that implements callbacks is <a href="/docs/transformers/pr_16143/en/main_classes/callback#transformers.TrainerCallback">TrainerCallback</a>. It gets the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a> used to instantiate the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>, can access that Trainer’s internal state via <a href="/docs/transformers/pr_16143/en/main_classes/callback#transformers.TrainerState">TrainerState</a>, and can take some actions on the training loop via <a href="/docs/transformers/pr_16143/en/main_classes/callback#transformers.TrainerControl">TrainerControl</a>.</p> <h2 class="relative group"><a id="transformers.integrations.CometCallback" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.integrations.CometCallback"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Available Callbacks </span></h2> <p>Here is the list of the available <a href="/docs/transformers/pr_16143/en/main_classes/callback#transformers.TrainerCallback">TrainerCallback</a> in the library:</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.integrations.CometCallback"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.integrations.</span><span class="font-semibold">CometCallback</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.integrations.CometCallback" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.integrations.CometCallback"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/integrations.py#L660" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/callback#transformers.TrainerCallback">TrainerCallback</a> that sends the logs to <a href="https://www.comet.ml/site/" rel="nofollow">Comet ML</a>.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.integrations.CometCallback.setup"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>setup</span></h4><!-- HTML_TAG_END --> <a id="transformers.integrations.CometCallback.setup" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.integrations.CometCallback.setup"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/integrations.py#L671" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">state<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Setup the optional Comet.ml integration.</p> <p>Environment: COMET_MODE (<code>str</code>, <em>optional</em>): Whether to create an online, offline experiment or disable Comet logging. Can be “OFFLINE”, “ONLINE”, or “DISABLED”. Defaults to “ONLINE”. COMET_PROJECT_NAME (<code>str</code>, <em>optional</em>): Comet project name for experiments COMET_OFFLINE_DIRECTORY (<code>str</code>, <em>optional</em>): Folder to use for saving offline experiments when <code>COMET_MODE</code> is “OFFLINE” COMET_LOG_ASSETS (<code>str</code>, <em>optional</em>): Whether or not to log training assets (tf event logs, checkpoints, etc), to Comet. Can be “TRUE”, or “FALSE”. Defaults to “TRUE”.</p> <p>For a number of configurable items in the environment, see <a href="https://www.comet.ml/docs/python-sdk/advanced/#comet-configuration-variables" rel="nofollow">here</a>.</p></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DefaultFlowCallback"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">DefaultFlowCallback</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.DefaultFlowCallback" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DefaultFlowCallback"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_callback.py#L406" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/callback#transformers.TrainerCallback">TrainerCallback</a> that handles the default flow of the training loop for logs, evaluation and checkpoints.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PrinterCallback"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">PrinterCallback</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.PrinterCallback" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PrinterCallback"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_callback.py#L494" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>A bare <a href="/docs/transformers/pr_16143/en/main_classes/callback#transformers.TrainerCallback">TrainerCallback</a> that just prints the logs.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ProgressCallback"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ProgressCallback</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ProgressCallback" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ProgressCallback"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_callback.py#L452" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/callback#transformers.TrainerCallback">TrainerCallback</a> that displays the progress of training or evaluation.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.EarlyStoppingCallback"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">EarlyStoppingCallback</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.EarlyStoppingCallback" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.EarlyStoppingCallback"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_callback.py#L505" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">early_stopping_patience<span class="opacity-60">: int = 1</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">early_stopping_threshold<span class="opacity-60">: typing.Optional[float] = 0.0</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.EarlyStoppingCallback.early_stopping_patience" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.EarlyStoppingCallback.early_stopping_patience"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>early_stopping_patience</strong> (<code>int</code>) &#x2014; Use with <code>metric_for_best_model</code> to stop training when the specified metric worsens for <code>early_stopping_patience</code> evaluation calls.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.EarlyStoppingCallback.early_stopping_threshold(float," class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.EarlyStoppingCallback.early_stopping_threshold(float,"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>early_stopping_threshold(<code>float</code>,</strong> <em>optional</em>) &#x2014; Use with TrainingArguments <code>metric_for_best_model</code> and <code>early_stopping_patience</code> to denote how much the specified metric must improve to satisfy early stopping conditions. `<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/callback#transformers.TrainerCallback">TrainerCallback</a> that handles early stopping.</p> <p>This callback depends on <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a> argument <em>load_best_model_at_end</em> functionality to set best_metric in <a href="/docs/transformers/pr_16143/en/main_classes/callback#transformers.TrainerState">TrainerState</a>.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.integrations.TensorBoardCallback"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.integrations.</span><span class="font-semibold">TensorBoardCallback</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.integrations.TensorBoardCallback" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.integrations.TensorBoardCallback"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/integrations.py#L446" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tb_writer<span class="opacity-60"> = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.integrations.TensorBoardCallback.tb_writer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.integrations.TensorBoardCallback.tb_writer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tb_writer</strong> (<code>SummaryWriter</code>, <em>optional</em>) &#x2014; The writer to use. Will instantiate one if not set.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/callback#transformers.TrainerCallback">TrainerCallback</a> that sends the logs to <a href="https://www.tensorflow.org/tensorboard" rel="nofollow">TensorBoard</a>.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.integrations.WandbCallback"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.integrations.</span><span class="font-semibold">WandbCallback</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.integrations.WandbCallback" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.integrations.WandbCallback"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/integrations.py#L534" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/callback#transformers.TrainerCallback">TrainerCallback</a> that sends the logs to <a href="https://www.wandb.com/" rel="nofollow">Weight and Biases</a>.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.integrations.WandbCallback.setup"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>setup</span></h4><!-- HTML_TAG_END --> <a id="transformers.integrations.WandbCallback.setup" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.integrations.WandbCallback.setup"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/integrations.py#L551" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">state<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Setup the optional Weights &amp; Biases (<em>wandb</em>) integration.</p> <p>One can subclass and override this method to customize the setup if needed. Find more information <a href="https://docs.wandb.ai/integrations/huggingface" rel="nofollow">here</a>. You can also override the following environment variables:</p> <p>Environment: WANDB_LOG_MODEL (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>): Whether or not to log model as artifact at the end of training. Use along with <em>TrainingArguments.load_best_model_at_end</em> to upload best model. WANDB_WATCH (<code>str</code>, <em>optional</em> defaults to <code>&quot;gradients&quot;</code>): Can be <code>&quot;gradients&quot;</code>, <code>&quot;all&quot;</code> or <code>&quot;false&quot;</code>. Set to <code>&quot;false&quot;</code> to disable gradient logging or <code>&quot;all&quot;</code> to log gradients and parameters. WANDB_PROJECT (<code>str</code>, <em>optional</em>, defaults to <code>&quot;huggingface&quot;</code>): Set this to a custom string to store results in a different project. WANDB_DISABLED (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>): Whether or not to disable wandb entirely. Set <em>WANDB_DISABLED=true</em> to disable.</p></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.integrations.MLflowCallback"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.integrations.</span><span class="font-semibold">MLflowCallback</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.integrations.MLflowCallback" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.integrations.MLflowCallback"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/integrations.py#L759" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/callback#transformers.TrainerCallback">TrainerCallback</a> that sends the logs to <a href="https://www.mlflow.org/" rel="nofollow">MLflow</a>.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.integrations.MLflowCallback.setup"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>setup</span></h4><!-- HTML_TAG_END --> <a id="transformers.integrations.MLflowCallback.setup" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.integrations.MLflowCallback.setup"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/integrations.py#L776" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">state<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Setup the optional MLflow integration.</p> <p>Environment: HF_MLFLOW_LOG_ARTIFACTS (<code>str</code>, <em>optional</em>): Whether to use MLflow .log_artifact() facility to log artifacts.</p> <p>This only makes sense if logging to a remote server, e.g. s3 or GCS. If set to <code>True</code> or <em>1</em>, will copy whatever is in <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a>’s <code>output_dir</code> to the local or remote artifact storage. Using it without a remote storage will just copy the files to your artifact location.</p></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.integrations.AzureMLCallback"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.integrations.</span><span class="font-semibold">AzureMLCallback</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.integrations.AzureMLCallback" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.integrations.AzureMLCallback"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/integrations.py#L736" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">azureml_run<span class="opacity-60"> = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/callback#transformers.TrainerCallback">TrainerCallback</a> that sends the logs to <a href="https://pypi.org/project/azureml-sdk/" rel="nofollow">AzureML</a>.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.integrations.CodeCarbonCallback"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.integrations.</span><span class="font-semibold">CodeCarbonCallback</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.integrations.CodeCarbonCallback" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.integrations.CodeCarbonCallback"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/integrations.py#L919" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/callback#transformers.TrainerCallback">TrainerCallback</a> that tracks the CO2 emission of training.</p></div> <h2 class="relative group"><a id="transformers.TrainerCallback" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerCallback"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TrainerCallback </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainerCallback"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TrainerCallback</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TrainerCallback" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainerCallback"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_callback.py#L159" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerCallback.args" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerCallback.args"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a>) &#x2014; The training arguments used to instantiate the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerCallback.state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerCallback.state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>state</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/callback#transformers.TrainerState">TrainerState</a>) &#x2014; The current state of the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerCallback.control" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerCallback.control"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>control</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/callback#transformers.TrainerControl">TrainerControl</a>) &#x2014; The object that is returned to the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> and can be used to make some decisions.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerCallback.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerCallback.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <code>torch.nn.Module</code>) &#x2014; The model being trained.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerCallback.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerCallback.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer used for encoding the data.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerCallback.optimizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerCallback.optimizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>optimizer</strong> (<code>torch.optim.Optimizer</code>) &#x2014; The optimizer used for the training steps.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerCallback.lr_scheduler" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerCallback.lr_scheduler"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>lr_scheduler</strong> (<code>torch.optim.lr_scheduler.LambdaLR</code>) &#x2014; The scheduler used for setting the learning rate.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerCallback.train_dataloader" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerCallback.train_dataloader"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>train_dataloader</strong> (<code>torch.utils.data.DataLoader</code>, <em>optional</em>) &#x2014; The current dataloader used for training.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerCallback.eval_dataloader" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerCallback.eval_dataloader"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eval_dataloader</strong> (<code>torch.utils.data.DataLoader</code>, <em>optional</em>) &#x2014; The current dataloader used for training.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerCallback.metrics" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerCallback.metrics"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>metrics</strong> (<code>Dict[str, float]</code>) &#x2014; The metrics computed by the last evaluation phase.</p> <p>Those are only accessible in the event <code>on_evaluate</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerCallback.logs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerCallback.logs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logs</strong> (<code>Dict[str, float]</code>) &#x2014; The values to log.</p> <p>Those are only accessible in the event <code>on_log</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>A class for objects that will inspect the state of the training loop at some events and take some decisions. At each of those events the following arguments are available:</p> <p>The <code>control</code> object is the only one that can be changed by the callback, in which case the event that changes it should return the modified version.</p> <p>The argument <code>args</code>, <code>state</code> and <code>control</code> are positionals for all events, all the others are grouped in <code>kwargs</code>. You can unpack the ones you need in the signature of the event using them. As an example, see the code of the simple <code>PrinterCallback</code>.</p> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">class</span> <span class="hljs-title class_">PrinterCallback</span>(<span class="hljs-title class_ inherited__">TrainerCallback</span>): <span class="hljs-keyword">def</span> <span class="hljs-title function_">on_log</span>(<span class="hljs-params">self, args, state, control, logs=<span class="hljs-literal">None</span>, **kwargs</span>): _ = logs.pop(<span class="hljs-string">&quot;total_flos&quot;</span>, <span class="hljs-literal">None</span>) <span class="hljs-keyword">if</span> state.is_local_process_zero: <span class="hljs-built_in">print</span>(logs)<!-- HTML_TAG_END --></pre></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainerCallback.on_epoch_begin"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>on_epoch_begin</span></h4><!-- HTML_TAG_END --> <a id="transformers.TrainerCallback.on_epoch_begin" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainerCallback.on_epoch_begin"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_callback.py#L227" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args<span class="opacity-60">: TrainingArguments</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">state<span class="opacity-60">: TrainerState</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">control<span class="opacity-60">: TrainerControl</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Event called at the beginning of an epoch.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainerCallback.on_epoch_end"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>on_epoch_end</span></h4><!-- HTML_TAG_END --> <a id="transformers.TrainerCallback.on_epoch_end" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainerCallback.on_epoch_end"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_callback.py#L233" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args<span class="opacity-60">: TrainingArguments</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">state<span class="opacity-60">: TrainerState</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">control<span class="opacity-60">: TrainerControl</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Event called at the end of an epoch.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainerCallback.on_evaluate"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>on_evaluate</span></h4><!-- HTML_TAG_END --> <a id="transformers.TrainerCallback.on_evaluate" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainerCallback.on_evaluate"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_callback.py#L259" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args<span class="opacity-60">: TrainingArguments</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">state<span class="opacity-60">: TrainerState</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">control<span class="opacity-60">: TrainerControl</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Event called after an evaluation phase.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainerCallback.on_init_end"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>on_init_end</span></h4><!-- HTML_TAG_END --> <a id="transformers.TrainerCallback.on_init_end" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainerCallback.on_init_end"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_callback.py#L209" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args<span class="opacity-60">: TrainingArguments</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">state<span class="opacity-60">: TrainerState</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">control<span class="opacity-60">: TrainerControl</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Event called at the end of the initialization of the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainerCallback.on_log"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>on_log</span></h4><!-- HTML_TAG_END --> <a id="transformers.TrainerCallback.on_log" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainerCallback.on_log"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_callback.py#L271" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args<span class="opacity-60">: TrainingArguments</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">state<span class="opacity-60">: TrainerState</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">control<span class="opacity-60">: TrainerControl</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Event called after logging the last logs.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainerCallback.on_prediction_step"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>on_prediction_step</span></h4><!-- HTML_TAG_END --> <a id="transformers.TrainerCallback.on_prediction_step" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainerCallback.on_prediction_step"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_callback.py#L277" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args<span class="opacity-60">: TrainingArguments</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">state<span class="opacity-60">: TrainerState</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">control<span class="opacity-60">: TrainerControl</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Event called after a prediction step.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainerCallback.on_save"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>on_save</span></h4><!-- HTML_TAG_END --> <a id="transformers.TrainerCallback.on_save" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainerCallback.on_save"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_callback.py#L265" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args<span class="opacity-60">: TrainingArguments</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">state<span class="opacity-60">: TrainerState</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">control<span class="opacity-60">: TrainerControl</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Event called after a checkpoint save.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainerCallback.on_step_begin"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>on_step_begin</span></h4><!-- HTML_TAG_END --> <a id="transformers.TrainerCallback.on_step_begin" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainerCallback.on_step_begin"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_callback.py#L239" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args<span class="opacity-60">: TrainingArguments</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">state<span class="opacity-60">: TrainerState</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">control<span class="opacity-60">: TrainerControl</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Event called at the beginning of a training step. If using gradient accumulation, one training step might take several inputs.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainerCallback.on_step_end"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>on_step_end</span></h4><!-- HTML_TAG_END --> <a id="transformers.TrainerCallback.on_step_end" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainerCallback.on_step_end"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_callback.py#L252" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args<span class="opacity-60">: TrainingArguments</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">state<span class="opacity-60">: TrainerState</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">control<span class="opacity-60">: TrainerControl</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Event called at the end of a training step. If using gradient accumulation, one training step might take several inputs.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainerCallback.on_substep_end"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>on_substep_end</span></h4><!-- HTML_TAG_END --> <a id="transformers.TrainerCallback.on_substep_end" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainerCallback.on_substep_end"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_callback.py#L246" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args<span class="opacity-60">: TrainingArguments</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">state<span class="opacity-60">: TrainerState</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">control<span class="opacity-60">: TrainerControl</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Event called at the end of an substep during gradient accumulation.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainerCallback.on_train_begin"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>on_train_begin</span></h4><!-- HTML_TAG_END --> <a id="transformers.TrainerCallback.on_train_begin" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainerCallback.on_train_begin"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_callback.py#L215" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args<span class="opacity-60">: TrainingArguments</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">state<span class="opacity-60">: TrainerState</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">control<span class="opacity-60">: TrainerControl</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Event called at the beginning of training.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainerCallback.on_train_end"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>on_train_end</span></h4><!-- HTML_TAG_END --> <a id="transformers.TrainerCallback.on_train_end" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainerCallback.on_train_end"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_callback.py#L221" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args<span class="opacity-60">: TrainingArguments</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">state<span class="opacity-60">: TrainerState</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">control<span class="opacity-60">: TrainerControl</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Event called at the end of training.</p></div></div> <p>Here is an example of how to register a custom callback with the PyTorch <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">class</span> <span class="hljs-title class_">MyCallback</span>(<span class="hljs-title class_ inherited__">TrainerCallback</span>): <span class="hljs-string">&quot;A callback that prints a message at the beginning of training&quot;</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">on_train_begin</span>(<span class="hljs-params">self, args, state, control, **kwargs</span>): <span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;Starting training&quot;</span>) trainer = Trainer( model, args, train_dataset=train_dataset, eval_dataset=eval_dataset, callbacks=[MyCallback], <span class="hljs-comment"># We can either pass the callback class this way or an instance of it (MyCallback())</span> )<!-- HTML_TAG_END --></pre></div> <p>Another way to register a callback is to call <code>trainer.add_callback()</code> as follows:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->trainer = Trainer(...) trainer.add_callback(MyCallback) <span class="hljs-comment"># Alternatively, we can pass an instance of the callback class</span> trainer.add_callback(MyCallback())<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="transformers.TrainerState" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerState"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TrainerState </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainerState"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TrainerState</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TrainerState" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainerState"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_callback.py#L35" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">epoch<span class="opacity-60">: typing.Optional[float] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">global_step<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_steps<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_train_epochs<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">total_flos<span class="opacity-60">: float = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">log_history<span class="opacity-60">: typing.List[typing.Dict[str, float]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">best_metric<span class="opacity-60">: typing.Optional[float] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">best_model_checkpoint<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">is_local_process_zero<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">is_world_process_zero<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">is_hyper_param_search<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">trial_name<span class="opacity-60">: str = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">trial_params<span class="opacity-60">: typing.Dict[str, typing.Union[str, float, int, bool]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerState.epoch" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerState.epoch"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>epoch</strong> (<code>float</code>, <em>optional</em>) &#x2014; Only set during training, will represent the epoch the training is at (the decimal part being the percentage of the current epoch completed).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerState.global_step" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerState.global_step"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>global_step</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; During training, represents the number of update steps completed.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerState.max_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerState.max_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; The number of update steps to do during the current training.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerState.total_flos" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerState.total_flos"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>total_flos</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; The total number of floating operations done by the model since the beginning of training (stored as floats to avoid overflow).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerState.log_history" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerState.log_history"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>log_history</strong> (<code>List[Dict[str, float]]</code>, <em>optional</em>) &#x2014; The list of logs done since the beginning of training.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerState.best_metric" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerState.best_metric"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>best_metric</strong> (<code>float</code>, <em>optional</em>) &#x2014; When tracking the best model, the value of the best metric encountered so far.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerState.best_model_checkpoint" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerState.best_model_checkpoint"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>best_model_checkpoint</strong> (<code>str</code>, <em>optional</em>) &#x2014; When tracking the best model, the value of the name of the checkpoint for the best model encountered so far.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerState.is_local_process_zero" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerState.is_local_process_zero"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>is_local_process_zero</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several machines) main process.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerState.is_world_process_zero" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerState.is_world_process_zero"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>is_world_process_zero</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not this process is the global main process (when training in a distributed fashion on several machines, this is only going to be <code>True</code> for one process).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerState.is_hyper_param_search" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerState.is_hyper_param_search"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>is_hyper_param_search</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether we are in the process of a hyper parameter search using Trainer.hyperparameter_search. This will impact the way data will be logged in TensorBoard.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>A class containing the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> inner state that will be saved along the model and optimizer when checkpointing and passed to the <a href="/docs/transformers/pr_16143/en/main_classes/callback#transformers.TrainerCallback">TrainerCallback</a>.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>In all this class, one step is to be understood as one update step. When using gradient accumulation, one update step may require several forward and backward passes: if you use <code>gradient_accumulation_steps=n</code>, then one update step requires going through <em>n</em> batches.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainerState.load_from_json"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>load_from_json</span></h4><!-- HTML_TAG_END --> <a id="transformers.TrainerState.load_from_json" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainerState.load_from_json"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_callback.py#L101" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">json_path<span class="opacity-60">: str</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Create an instance from the content of <code>json_path</code>.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainerState.save_to_json"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save_to_json</span></h4><!-- HTML_TAG_END --> <a id="transformers.TrainerState.save_to_json" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainerState.save_to_json"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_callback.py#L95" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">json_path<span class="opacity-60">: str</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Save the content of this instance in JSON format inside <code>json_path</code>.</p></div></div> <h2 class="relative group"><a id="transformers.TrainerControl" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerControl"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TrainerControl </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainerControl"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TrainerControl</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TrainerControl" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainerControl"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_callback.py#L110" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">should_training_stop<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">should_epoch_stop<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">should_save<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">should_evaluate<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">should_log<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerControl.should_training_stop" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerControl.should_training_stop"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>should_training_stop</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the training should be interrupted.</p> <p>If <code>True</code>, this variable will not be set back to <code>False</code>. The training will just stop.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerControl.should_epoch_stop" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerControl.should_epoch_stop"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>should_epoch_stop</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the current epoch should be interrupted.</p> <p>If <code>True</code>, this variable will be set back to <code>False</code> at the beginning of the next epoch.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerControl.should_save" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerControl.should_save"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>should_save</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the model should be saved at this step.</p> <p>If <code>True</code>, this variable will be set back to <code>False</code> at the beginning of the next step.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerControl.should_evaluate" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerControl.should_evaluate"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>should_evaluate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the model should be evaluated at this step.</p> <p>If <code>True</code>, this variable will be set back to <code>False</code> at the beginning of the next step.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainerControl.should_log" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainerControl.should_log"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>should_log</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the logs should be reported at this step.</p> <p>If <code>True</code>, this variable will be set back to <code>False</code> at the beginning of the next step.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>A class that handles the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> control flow. This class is used by the <a href="/docs/transformers/pr_16143/en/main_classes/callback#transformers.TrainerCallback">TrainerCallback</a> to activate some switches in the training loop.</p></div> <script type="module" data-hydrate="1sjof82"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="1sjof82"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/main_classes/callback.mdx-9aed4ad7.js") ], params: {} } }); </script>
425
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/main_classes/feature_extractor.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;feature-extractor&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.FeatureExtractionMixin&quot;,&quot;title&quot;:&quot;FeatureExtractionMixin&quot;},{&quot;local&quot;:&quot;transformers.SequenceFeatureExtractor&quot;,&quot;title&quot;:&quot;SequenceFeatureExtractor&quot;},{&quot;local&quot;:&quot;transformers.BatchFeature&quot;,&quot;title&quot;:&quot;BatchFeature&quot;},{&quot;local&quot;:&quot;transformers.ImageFeatureExtractionMixin&quot;,&quot;title&quot;:&quot;ImageFeatureExtractionMixin&quot;}],&quot;title&quot;:&quot;Feature Extractor&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/main_classes/feature_extractor.mdx-045f07b4.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="feature-extractor" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#feature-extractor"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Feature Extractor </span></h1> <p>A feature extractor is in charge of preparing input features for a multi-modal model. This includes feature extraction from sequences, <em>e.g.</em>, pre-processing audio files to Log-Mel Spectrogram features, feature extraction from images <em>e.g.</em> cropping image image files, but also padding, normalization, and conversion to Numpy, PyTorch, and TensorFlow tensors.</p> <h2 class="relative group"><a id="transformers.FeatureExtractionMixin" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FeatureExtractionMixin"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FeatureExtractionMixin </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FeatureExtractionMixin"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FeatureExtractionMixin</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FeatureExtractionMixin" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FeatureExtractionMixin"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/feature_extraction_utils.py#L205" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>This is a feature extraction mixin used to provide saving/loading functionality for sequential and image feature extractors.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FeatureExtractionMixin.from_pretrained"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>from_pretrained</span></h4><!-- HTML_TAG_END --> <a id="transformers.FeatureExtractionMixin.from_pretrained" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FeatureExtractionMixin.from_pretrained"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/feature_extraction_utils.py#L229" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pretrained_model_name_or_path<span class="opacity-60">: typing.Union[str, os.PathLike]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FeatureExtractionMixin.from_pretrained.pretrained_model_name_or_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FeatureExtractionMixin.from_pretrained.pretrained_model_name_or_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pretrained_model_name_or_path</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; This can be either:</p> <ul> <li>a string, the <em>model id</em> of a pretrained feature_extractor hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>a path to a <em>directory</em> containing a feature extractor file saved using the <a href="/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin.save_pretrained">save_pretrained()</a> method, e.g., <code>./my_model_directory/</code>.</li> <li>a path or url to a saved feature extractor JSON <em>file</em>, e.g., <code>./my_model_directory/preprocessor_config.json</code>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FeatureExtractionMixin.from_pretrained.cache_dir" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FeatureExtractionMixin.from_pretrained.cache_dir"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cache_dir</strong> (<code>str</code> or <code>os.PathLike</code>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model feature extractor should be cached if the standard cache should not be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FeatureExtractionMixin.from_pretrained.force_download" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FeatureExtractionMixin.from_pretrained.force_download"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>force_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to force to (re-)download the feature extractor files and override the cached versions if they exist.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FeatureExtractionMixin.from_pretrained.resume_download" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FeatureExtractionMixin.from_pretrained.resume_download"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>resume_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FeatureExtractionMixin.from_pretrained.proxies" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FeatureExtractionMixin.from_pretrained.proxies"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>proxies</strong> (<code>Dict[str, str]</code>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <code>{&apos;http&apos;: &apos;foo.bar:3128&apos;, &apos;http://hostname&apos;: &apos;foo.bar:4012&apos;}.</code> The proxies are used on each request.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FeatureExtractionMixin.from_pretrained.use_auth_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FeatureExtractionMixin.from_pretrained.use_auth_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_auth_token</strong> (<code>str</code> or <em>bool</em>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>transformers-cli login</code> (stored in <code>~/.huggingface</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FeatureExtractionMixin.from_pretrained.revision(str," class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FeatureExtractionMixin.from_pretrained.revision(str,"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>revision(<code>str</code>,</strong> <em>optional</em>, defaults to <code>&quot;main&quot;</code>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <code>revision</code> can be any identifier allowed by git.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FeatureExtractionMixin.from_pretrained.return_unused_kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FeatureExtractionMixin.from_pretrained.return_unused_kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_unused_kwargs</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; If <code>False</code>, then this function returns just the final feature extractor object. If <code>True</code>, then this functions returns a <code>Tuple(feature_extractor, unused_kwargs)</code> where <em>unused_kwargs</em> is a dictionary consisting of the key/value pairs whose keys are not feature extractor attributes: i.e., the part of <code>kwargs</code> which has not been used to update <code>feature_extractor</code> and is otherwise ignored.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FeatureExtractionMixin.from_pretrained.kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FeatureExtractionMixin.from_pretrained.kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>kwargs</strong> (<code>Dict[str, Any]</code>, <em>optional</em>) &#x2014; The values in kwargs of any keys which are feature extractor attributes will be used to override the loaded values. Behavior concerning key/value pairs whose keys are <em>not</em> feature extractor attributes is controlled by the <code>return_unused_kwargs</code> keyword parameter.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Instantiate a type of <a href="/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin">FeatureExtractionMixin</a> from a feature extractor, <em>e.g.</em> a derived class of <a href="/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.SequenceFeatureExtractor">SequenceFeatureExtractor</a>.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Passing <code>use_auth_token=True</code> is required when you want to use a private model.</p></div> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-comment"># We can&#x27;t instantiate directly the base class *FeatureExtractionMixin* nor *SequenceFeatureExtractor* so let&#x27;s show the examples on a</span> <span class="hljs-comment"># derived class: *Wav2Vec2FeatureExtractor*</span> feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( <span class="hljs-string">&quot;facebook/wav2vec2-base-960h&quot;</span> ) <span class="hljs-comment"># Download feature_extraction_config from huggingface.co and cache.</span> feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( <span class="hljs-string">&quot;./test/saved_model/&quot;</span> ) <span class="hljs-comment"># E.g. feature_extractor (or model) was saved using *save_pretrained(&#x27;./test/saved_model/&#x27;)*</span> feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(<span class="hljs-string">&quot;./test/saved_model/preprocessor_config.json&quot;</span>) feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( <span class="hljs-string">&quot;facebook/wav2vec2-base-960h&quot;</span>, return_attention_mask=<span class="hljs-literal">False</span>, foo=<span class="hljs-literal">False</span> ) <span class="hljs-keyword">assert</span> feature_extractor.return_attention_mask <span class="hljs-keyword">is</span> <span class="hljs-literal">False</span> feature_extractor, unused_kwargs = Wav2Vec2FeatureExtractor.from_pretrained( <span class="hljs-string">&quot;facebook/wav2vec2-base-960h&quot;</span>, return_attention_mask=<span class="hljs-literal">False</span>, foo=<span class="hljs-literal">False</span>, return_unused_kwargs=<span class="hljs-literal">True</span> ) <span class="hljs-keyword">assert</span> feature_extractor.return_attention_mask <span class="hljs-keyword">is</span> <span class="hljs-literal">False</span> <span class="hljs-keyword">assert</span> unused_kwargs == {<span class="hljs-string">&quot;foo&quot;</span>: <span class="hljs-literal">False</span>}<!-- HTML_TAG_END --></pre></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FeatureExtractionMixin.save_pretrained"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save_pretrained</span></h4><!-- HTML_TAG_END --> <a id="transformers.FeatureExtractionMixin.save_pretrained" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FeatureExtractionMixin.save_pretrained"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/feature_extraction_utils.py#L313" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_directory<span class="opacity-60">: typing.Union[str, os.PathLike]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">push_to_hub<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FeatureExtractionMixin.save_pretrained.save_directory" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FeatureExtractionMixin.save_pretrained.save_directory"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>save_directory</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; Directory where the feature extractor JSON file will be saved (will be created if it does not exist).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FeatureExtractionMixin.save_pretrained.push_to_hub" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FeatureExtractionMixin.save_pretrained.push_to_hub"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>push_to_hub</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to push your feature extractor to the Hugging Face model hub after saving it.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p>Using <code>push_to_hub=True</code> will synchronize the repository you are pushing to with <code>save_directory</code>, which requires <code>save_directory</code> to be a local clone of the repo you are pushing to if it&#x2019;s an existing folder. Pass along <code>temp_dir=True</code> to use a temporary directory instead.</p> </div> <p>kwargs &#x2014; Additional key word arguments passed along to the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.file_utils.PushToHubMixin.push_to_hub">push_to_hub()</a> method.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Save a feature_extractor object to the directory <code>save_directory</code>, so that it can be re-loaded using the <a href="/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin.from_pretrained">from_pretrained()</a> class method.</p></div></div> <h2 class="relative group"><a id="transformers.SequenceFeatureExtractor" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SequenceFeatureExtractor"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>SequenceFeatureExtractor </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.SequenceFeatureExtractor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">SequenceFeatureExtractor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.SequenceFeatureExtractor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.SequenceFeatureExtractor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/feature_extraction_sequence_utils.py#L38" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">feature_size<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sampling_rate<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">padding_value<span class="opacity-60">: float</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SequenceFeatureExtractor.feature_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SequenceFeatureExtractor.feature_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>feature_size</strong> (<code>int</code>) &#x2014; The feature dimension of the extracted features.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SequenceFeatureExtractor.sampling_rate" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SequenceFeatureExtractor.sampling_rate"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sampling_rate</strong> (<code>int</code>) &#x2014; The sampling rate at which the audio files should be digitalized expressed in Hertz per second (Hz).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SequenceFeatureExtractor.padding_value" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SequenceFeatureExtractor.padding_value"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding_value</strong> (<code>float</code>) &#x2014; The value that is used to fill the padding values / vectors.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>This is a general feature extraction class for speech recognition.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.SequenceFeatureExtractor.pad"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>pad</span></h4><!-- HTML_TAG_END --> <a id="transformers.SequenceFeatureExtractor.pad" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.SequenceFeatureExtractor.pad"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/feature_extraction_sequence_utils.py#L61" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">processed_features<span class="opacity-60">: typing.Union[transformers.feature_extraction_utils.BatchFeature, typing.List[transformers.feature_extraction_utils.BatchFeature], typing.Dict[str, transformers.feature_extraction_utils.BatchFeature], typing.Dict[str, typing.List[transformers.feature_extraction_utils.BatchFeature]], typing.List[typing.Dict[str, transformers.feature_extraction_utils.BatchFeature]]]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">padding<span class="opacity-60">: typing.Union[bool, str, transformers.file_utils.PaddingStrategy] = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">truncation<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_to_multiple_of<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_attention_mask<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: typing.Union[str, transformers.file_utils.TensorType, NoneType] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SequenceFeatureExtractor.pad.processed_features" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SequenceFeatureExtractor.pad.processed_features"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>processed_features</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.BatchFeature">BatchFeature</a>, list of <a href="/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.BatchFeature">BatchFeature</a>, <code>Dict[str, List[float]]</code>, <code>Dict[str, List[List[float]]</code> or <code>List[Dict[str, List[float]]]</code>) &#x2014; Processed inputs. Can represent one input (<a href="/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.BatchFeature">BatchFeature</a> or <code>Dict[str, List[float]]</code>) or a batch of input values / vectors (list of <a href="/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.BatchFeature">BatchFeature</a>, <em>Dict[str, List[List[float]]]</em> or <em>List[Dict[str, List[float]]]</em>) so you can use this method during preprocessing as well as in a PyTorch Dataloader collate function.</p> <p>Instead of <code>List[float]</code> you can have tensors (numpy arrays, PyTorch tensors or TensorFlow tensors), see the note above for the return type.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SequenceFeatureExtractor.pad.padding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SequenceFeatureExtractor.pad.padding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Select a strategy to pad the returned sequences (according to the model&#x2019;s padding side and padding index) among:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SequenceFeatureExtractor.pad.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SequenceFeatureExtractor.pad.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Maximum length of the returned list and optionally padding length (see above).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SequenceFeatureExtractor.pad.truncation" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SequenceFeatureExtractor.pad.truncation"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>truncation</strong> (<code>bool</code>) &#x2014; Activates truncation to cut input sequences longer than <code>max_length</code> to <code>max_length</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SequenceFeatureExtractor.pad.pad_to_multiple_of" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SequenceFeatureExtractor.pad.pad_to_multiple_of"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value.</p> <p>This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability</p> <blockquote> <p>= 7.5 (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.</p> </blockquote><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SequenceFeatureExtractor.pad.return_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SequenceFeatureExtractor.pad.return_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return the attention mask. If left to the default, will return the attention mask according to the specific feature_extractor&#x2019;s default.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SequenceFeatureExtractor.pad.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SequenceFeatureExtractor.pad.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul><!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Pad input values / input vectors or a batch of input values / input vectors up to predefined length or to the max sequence length in the batch.</p> <p>Padding side (left/right) padding values are defined at the feature extractor level (with <code>self.padding_side</code>, <code>self.padding_value</code>)</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If the <code>processed_features</code> passed are dictionary of numpy arrays, PyTorch tensors or TensorFlow tensors, the result will use the same type unless you provide a different tensor type with <code>return_tensors</code>. In the case of PyTorch tensors, you will lose the specific device of your tensors however.</p></div></div></div> <h2 class="relative group"><a id="transformers.BatchFeature" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchFeature"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>BatchFeature </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BatchFeature"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">BatchFeature</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.BatchFeature" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BatchFeature"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/feature_extraction_utils.py#L63" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">data<span class="opacity-60">: typing.Union[typing.Dict[str, typing.Any], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tensor_type<span class="opacity-60">: typing.Union[NoneType, str, transformers.file_utils.TensorType] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchFeature.data" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchFeature.data"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>data</strong> (<code>dict</code>) &#x2014; Dictionary of lists/arrays/tensors returned by the <strong>call</strong>/pad methods (&#x2018;input_values&#x2019;, &#x2018;attention_mask&#x2019;, etc.).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchFeature.tensor_type" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchFeature.tensor_type"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tensor_type</strong> (<code>Union[None, str, TensorType]</code>, <em>optional</em>) &#x2014; You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at initialization.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Holds the output of the <a href="/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.SequenceFeatureExtractor.pad">pad()</a> and feature extractor specific <code>__call__</code> methods.</p> <p>This class is derived from a python dictionary and can be used as a dictionary.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BatchFeature.convert_to_tensors"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>convert_to_tensors</span></h4><!-- HTML_TAG_END --> <a id="transformers.BatchFeature.convert_to_tensors" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BatchFeature.convert_to_tensors"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/feature_extraction_utils.py#L117" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tensor_type<span class="opacity-60">: typing.Union[str, transformers.file_utils.TensorType, NoneType] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchFeature.convert_to_tensors.tensor_type" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchFeature.convert_to_tensors.tensor_type"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tensor_type</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; The type of tensors to use. If <code>str</code>, should be one of the values of the enum <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.TensorType">TensorType</a>. If <code>None</code>, no modification is done.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Convert the inner content to tensors.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BatchFeature.to"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>to</span></h4><!-- HTML_TAG_END --> <a id="transformers.BatchFeature.to" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BatchFeature.to"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/feature_extraction_utils.py#L182" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">device<span class="opacity-60">: typing.Union[str, ForwardRef(&#39;torch.device&#39;)]</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.BatchFeature" >BatchFeature</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchFeature.to.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchFeature.to.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>str</code> or <code>torch.device</code>) &#x2014; The device to put the tensors on.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.BatchFeature.to.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.BatchFeature" >BatchFeature</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The same instance after modification.</p> <!-- HTML_TAG_END --></p></div></div> <p>Send all values to device by calling <code>v.to(device)</code> (PyTorch only).</p></div></div> <h2 class="relative group"><a id="transformers.ImageFeatureExtractionMixin" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageFeatureExtractionMixin"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ImageFeatureExtractionMixin </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ImageFeatureExtractionMixin"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ImageFeatureExtractionMixin</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ImageFeatureExtractionMixin" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ImageFeatureExtractionMixin"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/image_utils.py#L76" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Mixin that contain utilities for preparing image features.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ImageFeatureExtractionMixin.center_crop"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>center_crop</span></h4><!-- HTML_TAG_END --> <a id="transformers.ImageFeatureExtractionMixin.center_crop" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ImageFeatureExtractionMixin.center_crop"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/image_utils.py#L249" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">image<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">size<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageFeatureExtractionMixin.center_crop.image" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageFeatureExtractionMixin.center_crop.image"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>image</strong> (<code>PIL.Image.Image</code> or <code>np.ndarray</code> or <code>torch.Tensor</code>) &#x2014; The image to resize.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageFeatureExtractionMixin.center_crop.size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageFeatureExtractionMixin.center_crop.size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>size</strong> (<code>int</code> or <code>Tuple[int, int]</code>) &#x2014; The size to which crop the image.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Crops <code>image</code> to the given size using a center crop. Note that if the image is too small to be cropped to the size given, it will be padded (so the returned result has the size asked).</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ImageFeatureExtractionMixin.normalize"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>normalize</span></h4><!-- HTML_TAG_END --> <a id="transformers.ImageFeatureExtractionMixin.normalize" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ImageFeatureExtractionMixin.normalize"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/image_utils.py#L151" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">image<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mean<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">std<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageFeatureExtractionMixin.normalize.image" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageFeatureExtractionMixin.normalize.image"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>image</strong> (<code>PIL.Image.Image</code> or <code>np.ndarray</code> or <code>torch.Tensor</code>) &#x2014; The image to normalize.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageFeatureExtractionMixin.normalize.mean" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageFeatureExtractionMixin.normalize.mean"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mean</strong> (<code>List[float]</code> or <code>np.ndarray</code> or <code>torch.Tensor</code>) &#x2014; The mean (per channel) to use for normalization.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageFeatureExtractionMixin.normalize.std" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageFeatureExtractionMixin.normalize.std"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>std</strong> (<code>List[float]</code> or <code>np.ndarray</code> or <code>torch.Tensor</code>) &#x2014; The standard deviation (per channel) to use for normalization.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Normalizes <code>image</code> with <code>mean</code> and <code>std</code>. Note that this will trigger a conversion of <code>image</code> to a NumPy array if it’s a PIL Image.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ImageFeatureExtractionMixin.resize"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>resize</span></h4><!-- HTML_TAG_END --> <a id="transformers.ImageFeatureExtractionMixin.resize" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ImageFeatureExtractionMixin.resize"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/image_utils.py#L187" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">image<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">size<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">resample<span class="opacity-60"> = 2</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">default_to_square<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_size<span class="opacity-60"> = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageFeatureExtractionMixin.resize.image" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageFeatureExtractionMixin.resize.image"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>image</strong> (<code>PIL.Image.Image</code> or <code>np.ndarray</code> or <code>torch.Tensor</code>) &#x2014; The image to resize.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageFeatureExtractionMixin.resize.size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageFeatureExtractionMixin.resize.size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>size</strong> (<code>int</code> or <code>Tuple[int, int]</code>) &#x2014; The size to use for resizing the image. If <code>size</code> is a sequence like (h, w), output size will be matched to this.</p> <p>If <code>size</code> is an int and <code>default_to_square</code> is <code>True</code>, then image will be resized to (size, size). If <code>size</code> is an int and <code>default_to_square</code> is <code>False</code>, then smaller edge of the image will be matched to this number. i.e, if height &gt; width, then image will be rescaled to (size * height / width, size).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageFeatureExtractionMixin.resize.resample" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageFeatureExtractionMixin.resize.resample"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>resample</strong> (<code>int</code>, <em>optional</em>, defaults to <code>PIL.Image.BILINEAR</code>) &#x2014; The filter to user for resampling.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageFeatureExtractionMixin.resize.default_to_square" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageFeatureExtractionMixin.resize.default_to_square"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>default_to_square</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; How to convert <code>size</code> when it is a single int. If set to <code>True</code>, the <code>size</code> will be converted to a square (<code>size</code>,<code>size</code>). If set to <code>False</code>, will replicate <a href="https://pytorch.org/vision/stable/transforms.html#torchvision.transforms.Resize" rel="nofollow"><code>torchvision.transforms.Resize</code></a> with support for resizing only the smallest edge and providing an optional <code>max_size</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageFeatureExtractionMixin.resize.max_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageFeatureExtractionMixin.resize.max_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_size</strong> (<code>int</code>, <em>optional</em>, defaults to <code>None</code>) &#x2014; The maximum allowed for the longer edge of the resized image: if the longer edge of the image is greater than <code>max_size</code> after being resized according to <code>size</code>, then the image is resized again so that the longer edge is equal to <code>max_size</code>. As a result, <code>size</code> might be overruled, i.e the smaller edge may be shorter than <code>size</code>. Only used if <code>default_to_square</code> is <code>False</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Resizes <code>image</code>. Note that this will trigger a conversion of <code>image</code> to a PIL Image.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ImageFeatureExtractionMixin.to_numpy_array"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>to_numpy_array</span></h4><!-- HTML_TAG_END --> <a id="transformers.ImageFeatureExtractionMixin.to_numpy_array" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ImageFeatureExtractionMixin.to_numpy_array"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/image_utils.py#L118" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">image<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">rescale<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">channel_first<span class="opacity-60"> = True</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageFeatureExtractionMixin.to_numpy_array.image" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageFeatureExtractionMixin.to_numpy_array.image"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>image</strong> (<code>PIL.Image.Image</code> or <code>np.ndarray</code> or <code>torch.Tensor</code>) &#x2014; The image to convert to a NumPy array.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageFeatureExtractionMixin.to_numpy_array.rescale" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageFeatureExtractionMixin.to_numpy_array.rescale"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>rescale</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to apply the scaling factor (to make pixel values floats between 0. and 1.). Will default to <code>True</code> if the image is a PIL Image or an array/tensor of integers, <code>False</code> otherwise.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageFeatureExtractionMixin.to_numpy_array.channel_first" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageFeatureExtractionMixin.to_numpy_array.channel_first"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>channel_first</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to permute the dimensions of the image to put the channel dimension first.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Converts <code>image</code> to a numpy array. Optionally rescales it and puts the channel dimension as the first dimension.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ImageFeatureExtractionMixin.to_pil_image"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>to_pil_image</span></h4><!-- HTML_TAG_END --> <a id="transformers.ImageFeatureExtractionMixin.to_pil_image" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ImageFeatureExtractionMixin.to_pil_image"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/image_utils.py#L88" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">image<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">rescale<span class="opacity-60"> = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageFeatureExtractionMixin.to_pil_image.image" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageFeatureExtractionMixin.to_pil_image.image"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>image</strong> (<code>PIL.Image.Image</code> or <code>numpy.ndarray</code> or <code>torch.Tensor</code>) &#x2014; The image to convert to the PIL Image format.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageFeatureExtractionMixin.to_pil_image.rescale" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageFeatureExtractionMixin.to_pil_image.rescale"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>rescale</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to apply the scaling factor (to make pixel values integers between 0 and 255). Will default to <code>True</code> if the image type is a floating type, <code>False</code> otherwise.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Converts <code>image</code> to a PIL Image. Optionally rescales it and puts the channel dimension back as the last axis if needed.</p></div></div> <script type="module" data-hydrate="rsm1u7"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="rsm1u7"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/main_classes/feature_extractor.mdx-045f07b4.js") ], params: {} } }); </script>
426
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/main_classes/text_generation.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;generation&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.generation_utils.GenerationMixin&quot;,&quot;title&quot;:&quot;GenerationMixn&quot;},{&quot;local&quot;:&quot;transformers.generation_tf_utils.TFGenerationMixin&quot;,&quot;title&quot;:&quot;TFGenerationMixn&quot;},{&quot;local&quot;:&quot;transformers.generation_flax_utils.FlaxGenerationMixin&quot;,&quot;title&quot;:&quot;FlaxGenerationMixn&quot;}],&quot;title&quot;:&quot;Generation&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/main_classes/text_generation.mdx-5e23a84f.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="generation" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#generation"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Generation </span></h1> <p>The methods for auto-regressive text generation, namely <a href="/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.generate">generate()</a> (for the PyTorch models), <a href="/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_tf_utils.TFGenerationMixin.generate">generate()</a> (for the TensorFlow models) and <a href="/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_flax_utils.FlaxGenerationMixin.generate">generate()</a> (for the Flax/JAX models), are implemented in <a href="/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin">GenerationMixin</a>, <a href="/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_tf_utils.TFGenerationMixin">TFGenerationMixin</a> and <a href="/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_flax_utils.FlaxGenerationMixin">FlaxGenerationMixin</a> respectively.</p> <p>The <code>GenerationMixin</code> classes are inherited by the corresponding base model classes, <em>e.g.</em> <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>, <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>, and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel">FlaxPreTrainedModel</a> respectively, therefore exposing all methods for auto-regressive text generation to every model class.</p> <h2 class="relative group"><a id="transformers.generation_utils.GenerationMixin" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>GenerationMixn </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.generation_utils.GenerationMixin"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.generation_utils.</span><span class="font-semibold">GenerationMixin</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.generation_utils.GenerationMixin" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.generation_utils.GenerationMixin"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_utils.py#L379" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>A class containing all functions for auto-regressive text generation, to be used as a mixin in <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>.</p> <p>The class exposes <a href="/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.generate">generate()</a>, which can be used for:</p> <ul><li><em>greedy decoding</em> by calling <a href="/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.greedy_search">greedy_search()</a> if <code>num_beams=1</code> and <code>do_sample=False</code>.</li> <li><em>multinomial sampling</em> by calling <a href="/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.sample">sample()</a> if <code>num_beams=1</code> and <code>do_sample=True</code>.</li> <li><em>beam-search decoding</em> by calling <a href="/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.beam_search">beam_search()</a> if <code>num_beams&gt;1</code> and <code>do_sample=False</code>.</li> <li><em>beam-search multinomial sampling</em> by calling <a href="/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.beam_sample">beam_sample()</a> if <code>num_beams&gt;1</code> and <code>do_sample=True</code>.</li> <li><em>diverse beam-search decoding</em> by calling <a href="/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.group_beam_search">group_beam_search()</a>, if <code>num_beams&gt;1</code> and <code>num_beam_groups&gt;1</code>.</li> <li><em>constrained beam-search decoding</em> by calling <a href="/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.constrained_beam_search">constrained_beam_search()</a>, if <code>constraints!=None</code> or <code>force_words_ids!=None</code>.</li></ul> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.generation_utils.GenerationMixin.generate"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>generate</span></h4><!-- HTML_TAG_END --> <a id="transformers.generation_utils.GenerationMixin.generate" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.generation_utils.GenerationMixin.generate"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_utils.py#L832" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60">: typing.Optional[torch.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">min_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_sample<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">early_stopping<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_beams<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">temperature<span class="opacity-60">: typing.Optional[float] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">top_k<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">top_p<span class="opacity-60">: typing.Optional[float] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">typical_p<span class="opacity-60">: typing.Optional[float] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">repetition_penalty<span class="opacity-60">: typing.Optional[float] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bad_words_ids<span class="opacity-60">: typing.Optional[typing.Iterable[int]] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">force_words_ids<span class="opacity-60">: typing.Union[typing.Iterable[int], typing.Iterable[typing.Iterable[int]], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bos_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">length_penalty<span class="opacity-60">: typing.Optional[float] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">no_repeat_ngram_size<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_no_repeat_ngram_size<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_return_sequences<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_time<span class="opacity-60">: typing.Optional[float] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_new_tokens<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_start_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_beam_groups<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">diversity_penalty<span class="opacity-60">: typing.Optional[float] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">prefix_allowed_tokens_fn<span class="opacity-60">: typing.Union[typing.Callable[[int, torch.Tensor], typing.List[int]], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits_processor<span class="opacity-60">: typing.Optional[transformers.generation_logits_process.LogitsProcessorList] = []</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">stopping_criteria<span class="opacity-60">: typing.Optional[transformers.generation_stopping_criteria.StoppingCriteriaList] = []</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">constraints<span class="opacity-60">: typing.Optional[typing.List[transformers.generation_beam_constraints.Constraint]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_scores<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict_in_generate<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">forced_bos_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">forced_eos_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">remove_invalid_values<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">synced_gpus<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">exponential_decay_length_penalty<span class="opacity-60">: typing.Union[typing.Tuple[typing.Union[int, float]], NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**model_kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput" >ModelOutput</a> or <code>torch.LongTensor</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.inputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.inputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs</strong> (<code>torch.Tensor</code> of varying shape depending on the modality, <em>optional</em>) &#x2014; The sequence used as a prompt for the generation or as model inputs to the encoder. If <code>None</code> the method initializes it with <code>bos_token_id</code> and a batch size of 1. For decoder-only models <code>inputs</code> should of in the format of <code>input_ids</code>. For encoder-decoder models <em>inputs</em> can represent any of <code>input_ids</code>, <code>input_values</code>, <code>input_features</code>, or <code>pixel_values</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>, defaults to <code>model.config.max_length</code>) &#x2014; The maximum length of the sequence to be generated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.max_new_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.max_new_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_new_tokens</strong> (<code>int</code>, <em>optional</em>, defaults to None) &#x2014; The maximum numbers of tokens to generate, ignore the current number of tokens. Use either <code>max_new_tokens</code> or <code>max_length</code> but not both, they serve the same purpose.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.min_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.min_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>min_length</strong> (<code>int</code>, <em>optional</em>, defaults to 10) &#x2014; The minimum length of the sequence to be generated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.do_sample" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.do_sample"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>do_sample</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use sampling ; use greedy decoding otherwise.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.early_stopping" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.early_stopping"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>early_stopping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to stop the beam search when at least <code>num_beams</code> sentences are finished per batch or not.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.num_beams" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.num_beams"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_beams</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Number of beams for beam search. 1 means no beam search.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.temperature" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.temperature"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>temperature</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; The value used to module the next token probabilities.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.top_k" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.top_k"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>top_k</strong> (<code>int</code>, <em>optional</em>, defaults to 50) &#x2014; The number of highest probability vocabulary tokens to keep for top-k-filtering.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.top_p" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.top_p"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>top_p</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; If set to float &lt; 1, only the most probable tokens with probabilities that add up to <code>top_p</code> or higher are kept for generation.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.repetition_penalty" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.repetition_penalty"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>repetition_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; The parameter for repetition penalty. 1.0 means no penalty. See <a href="https://arxiv.org/pdf/1909.05858.pdf" rel="nofollow">this paper</a> for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.pad_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.pad_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.bos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.bos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>beginning-of-sequence</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.length_penalty" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.length_penalty"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>length_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; Exponential penalty to the length. 1.0 means no penalty. Set to values &lt; 1.0 in order to encourage the model to generate shorter sequences, to a value &gt; 1.0 in order to encourage the model to produce longer sequences.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.no_repeat_ngram_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.no_repeat_ngram_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>no_repeat_ngram_size</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to int &gt; 0, all ngrams of that size can only occur once.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.encoder_no_repeat_ngram_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.encoder_no_repeat_ngram_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_no_repeat_ngram_size</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to int &gt; 0, all ngrams of that size that occur in the <code>encoder_input_ids</code> cannot occur in the <code>decoder_input_ids</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.bad_words_ids(List[List[int]]," class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.bad_words_ids(List[List[int]],"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bad_words_ids(<code>List[List[int]]</code>,</strong> <em>optional</em>) &#x2014; List of token ids that are not allowed to be generated. In order to get the token ids of the words that should not appear in the generated text, use <code>tokenizer(bad_words, add_prefix_space=True, add_special_tokens=False).input_ids</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.force_words_ids(List[List[int]]" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.force_words_ids(List[List[int]]"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>force_words_ids(<code>List[List[int]]</code></strong> or <code>List[List[List[int]]]</code>, <em>optional</em>) &#x2014; List of token ids that must be generated. If given a <code>List[List[int]]</code>, this is treated as a simple list of words that must be included, the opposite to <code>bad_words_ids</code>. If given <code>List[List[List[int]]]</code>, this triggers a <a href="https://github.com/huggingface/transformers/issues/14081" rel="nofollow">disjunctive constraint</a>, where one can allow different forms of each word.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.num_return_sequences(int," class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.num_return_sequences(int,"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_return_sequences(<code>int</code>,</strong> <em>optional</em>, defaults to 1) &#x2014; The number of independently computed returned sequences for each element in the batch.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.max_time(float," class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.max_time(float,"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_time(<code>float</code>,</strong> <em>optional</em>, defaults to None) &#x2014; The maximum amount of time you allow the computation to run for in seconds. generation will still finish the current pass after allocated time has been passed.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values are in <code>[0, 1]</code>, 1 for tokens that are not masked, and 0 for masked tokens. If not provided, will default to a tensor the same shape as <code>input_ids</code> that masks the pad token. <a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.decoder_start_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.decoder_start_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_start_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; If an encoder-decoder model starts decoding with a different token than <em>bos</em>, the id of that token. use_cache &#x2014; (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>): Whether or not the model should use the past last key/values attentions (if applicable to the model) to speed up decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.num_beam_groups" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.num_beam_groups"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_beam_groups</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Number of groups to divide <code>num_beams</code> into in order to ensure diversity among different groups of beams. <a href="https://arxiv.org/pdf/1610.02424.pdf" rel="nofollow">this paper</a> for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.diversity_penalty" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.diversity_penalty"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>diversity_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; This value is subtracted from a beam&#x2019;s score if it generates a token same as any beam from other group at a particular time. Note that <code>diversity_penalty</code> is only effective if <code>group beam search</code> is enabled. prefix_allowed_tokens_fn &#x2014; (<code>Callable[[int, torch.Tensor], List[int]]</code>, <em>optional</em>): If provided, this function constraints the beam search to allowed tokens only at each step. If not provided no constraint is applied. This function takes 2 arguments: the batch ID <code>batch_id</code> and <code>input_ids</code>. It has to return a list with the allowed tokens for the next generation step conditioned on the batch ID <code>batch_id</code> and the previously generated tokens <code>inputs_ids</code>. This argument is useful for constrained generation conditioned on the prefix, as described in <a href="https://arxiv.org/abs/2010.00904" rel="nofollow">Autoregressive Entity Retrieval</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.logits_processor" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.logits_processor"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits_processor</strong> (<code>LogitsProcessorList</code>, <em>optional</em>) &#x2014; Custom logits processors that complement the default logits processors built from arguments and a model&#x2019;s config. If a logit processor is passed that is already created with the arguments or a model&#x2019;s config an error is thrown. This feature is intended for advanced users.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.stopping_criteria" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.stopping_criteria"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>stopping_criteria</strong> (<code>StoppingCriteriaList</code>, <em>optional</em>) &#x2014; Custom stopping criteria that complement the default stopping criteria built from arguments and a model&#x2019;s config. If a stopping criteria is passed that is already created with the arguments or a model&#x2019;s config an error is thrown. This feature is intended for advanced users.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.constraints" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.constraints"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>constraints</strong> (<code>List[Constraint]</code>, <em>optional</em>) &#x2014; Custom constraints that can be added to the generation to ensure that the output will contain the use of certain tokens as defined by <code>Constraint</code> objects, in the most sensible way possible.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.output_scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.output_scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_scores</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the prediction scores. See <code>scores</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.return_dict_in_generate" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.return_dict_in_generate"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict_in_generate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.forced_bos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.forced_bos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>forced_bos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the token to force as the first generated token after the <code>decoder_start_token_id</code>. Useful for multilingual models like <a href="../model_doc/mbart">mBART</a> where the first generated token needs to be the target language token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.forced_eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.forced_eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>forced_eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the token to force as the last generated token when <code>max_length</code> is reached.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.remove_invalid_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.remove_invalid_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>remove_invalid_values</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to remove possible <em>nan</em> and <em>inf</em> outputs of the model to prevent the generation method to crash. Note that using <code>remove_invalid_values</code> can slow down generation.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.synced_gpus" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.synced_gpus"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>synced_gpus</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to continue running the while loop until max_length (needed for ZeRO stage 3)<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.generate.exponential_decay_length_penalty" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.generate.exponential_decay_length_penalty"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>exponential_decay_length_penalty</strong> (<code>tuple(int, float)</code>, <em>optional</em>) &#x2014; This Tuple adds an exponentially increasing length penalty, after a certain amount of tokens have been generated. The tuple shall consist of: <code>(start_index, decay_factor)</code> where <code>start_index</code> indicates where penalty starts and <code>decay_factor</code> represents the factor of exponential decay</p> <p>model<em>kwargs &#x2014; Additional model specific kwargs will be forwarded to the <code>forward</code> function of the model. If the model is an encoder-decoder model, encoder specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder</em>*.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.generation_utils.GenerationMixin.generate.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput" >ModelOutput</a> or <code>torch.LongTensor</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput" >ModelOutput</a> (if <code>return_dict_in_generate=True</code> or when <code>config.return_dict_in_generate=True</code>) or a <code>torch.FloatTensor</code>.</p> <p>If the model is <em>not</em> an encoder-decoder model (<code>model.config.is_encoder_decoder=False</code>), the possible <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput" >ModelOutput</a> types are:</p> <ul> <li><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.generation_utils.GreedySearchDecoderOnlyOutput" >GreedySearchDecoderOnlyOutput</a>,</li> <li><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.generation_utils.SampleDecoderOnlyOutput" >SampleDecoderOnlyOutput</a>,</li> <li><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.generation_utils.BeamSearchDecoderOnlyOutput" >BeamSearchDecoderOnlyOutput</a>,</li> <li><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.generation_utils.BeamSampleDecoderOnlyOutput" >BeamSampleDecoderOnlyOutput</a></li> </ul> <p>If the model is an encoder-decoder model (<code>model.config.is_encoder_decoder=True</code>), the possible <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput" >ModelOutput</a> types are:</p> <ul> <li><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.generation_utils.GreedySearchEncoderDecoderOutput" >GreedySearchEncoderDecoderOutput</a>,</li> <li><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.generation_utils.SampleEncoderDecoderOutput" >SampleEncoderDecoderOutput</a>,</li> <li><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.generation_utils.BeamSearchEncoderDecoderOutput" >BeamSearchEncoderDecoderOutput</a>,</li> <li><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.generation_utils.BeamSampleEncoderDecoderOutput" >BeamSampleEncoderDecoderOutput</a></li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>Generates sequences of token ids for models with a language modeling head. The method supports the following generation methods for text-decoder, text-to-text, speech-to-text, and vision-to-text models:</p> <ul><li><em>greedy decoding</em> by calling <a href="/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.greedy_search">greedy_search()</a> if <code>num_beams=1</code> and <code>do_sample=False</code>.</li> <li><em>multinomial sampling</em> by calling <a href="/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.sample">sample()</a> if <code>num_beams=1</code> and <code>do_sample=True</code>.</li> <li><em>beam-search decoding</em> by calling <a href="/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.beam_search">beam_search()</a> if <code>num_beams&gt;1</code> and <code>do_sample=False</code>.</li> <li><em>beam-search multinomial sampling</em> by calling <a href="/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.beam_sample">beam_sample()</a> if <code>num_beams&gt;1</code> and <code>do_sample=True</code>.</li> <li><em>diverse beam-search decoding</em> by calling <a href="/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.group_beam_search">group_beam_search()</a>, if <code>num_beams&gt;1</code> and <code>num_beam_groups&gt;1</code>.</li> <li><em>constrained beam-search decoding</em> by calling <a href="/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.constrained_beam_search">constrained_beam_search()</a>, if <code>constraints!=None</code> or <code>force_words_ids!=None</code>.</li></ul> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"><p>Apart from <code>inputs</code>, all the arguments below will default to the value of the attribute of the same name as defined in the model’s config (<code>config.json</code>) which in turn defaults to the <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> of the model.</p></div> <p>Most of these parameters are explained in more detail in <a href="https://huggingface.co/blog/how-to-generate" rel="nofollow">this blog post</a>.</p> <p>Examples:</p> <p>Greedy Decoding:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, AutoModelForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForCausalLM.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;Today I believe we can finally&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(prompt, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># generate up to 30 tokens</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.generate(input_ids, do_sample=<span class="hljs-literal">False</span>, max_length=<span class="hljs-number">30</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(outputs, skip_special_tokens=<span class="hljs-literal">True</span>) [<span class="hljs-string">&#x27;Today I believe we can finally get to the point where we can make a difference in the lives of the people of the United States of America.\n&#x27;</span>]<!-- HTML_TAG_END --></pre></div> <p>Multinomial Sampling:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, AutoModelForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForCausalLM.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;Today I believe we can finally&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(prompt, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># sample up to 30 tokens</span> <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.generate(input_ids, do_sample=<span class="hljs-literal">True</span>, max_length=<span class="hljs-number">30</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(outputs, skip_special_tokens=<span class="hljs-literal">True</span>) [<span class="hljs-string">&#x27;Today I believe we can finally get rid of discrimination,&quot; said Rep. Mark Pocan (D-Wis.).\n\n&quot;Just look at the&#x27;</span>]<!-- HTML_TAG_END --></pre></div> <p>Beam-search decoding:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, AutoModelForSeq2SeqLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;Helsinki-NLP/opus-mt-en-de&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;Helsinki-NLP/opus-mt-en-de&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>sentence = <span class="hljs-string">&quot;Paris is one of the densest populated areas in Europe.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(sentence, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.generate(input_ids) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(outputs, skip_special_tokens=<span class="hljs-literal">True</span>) [<span class="hljs-string">&#x27;Paris ist eines der dichtesten besiedelten Gebiete Europas.&#x27;</span>]<!-- HTML_TAG_END --></pre></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.generation_utils.GenerationMixin.greedy_search"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>greedy_search</span></h4><!-- HTML_TAG_END --> <a id="transformers.generation_utils.GenerationMixin.greedy_search" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.generation_utils.GenerationMixin.greedy_search"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_utils.py#L1489" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits_processor<span class="opacity-60">: typing.Optional[transformers.generation_logits_process.LogitsProcessorList] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">stopping_criteria<span class="opacity-60">: typing.Optional[transformers.generation_stopping_criteria.StoppingCriteriaList] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_scores<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict_in_generate<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">synced_gpus<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**model_kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.greedy_search.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.greedy_search.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; The sequence used as a prompt for the generation.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.greedy_search.logits_processor" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.greedy_search.logits_processor"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits_processor</strong> (<code>LogitsProcessorList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessorList">LogitsProcessorList</a>. List of instances of class derived from <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> used to modify the prediction scores of the language modeling head applied at each generation step.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.greedy_search.stopping_criteria" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.greedy_search.stopping_criteria"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>stopping_criteria</strong> (<code>StoppingCriteriaList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.StoppingCriteriaList">StoppingCriteriaList</a>. List of instances of class derived from <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.StoppingCriteria">StoppingCriteria</a> used to tell if the generation loop should stop.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.greedy_search.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.greedy_search.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>, defaults to 20) &#x2014; <strong>DEPRECATED</strong>. Use <code>logits_processor</code> or <code>stopping_criteria</code> directly to cap the number of generated tokens. The maximum length of the sequence to be generated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.greedy_search.pad_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.greedy_search.pad_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.greedy_search.eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.greedy_search.eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.greedy_search.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.greedy_search.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.greedy_search.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.greedy_search.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.greedy_search.output_scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.greedy_search.output_scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_scores</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the prediction scores. See <code>scores</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.greedy_search.return_dict_in_generate" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.greedy_search.return_dict_in_generate"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict_in_generate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.greedy_search.synced_gpus" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.greedy_search.synced_gpus"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>synced_gpus</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs &#x2014; Additional model specific keyword arguments will be forwarded to the <code>forward</code> function of the model. If model is an encoder-decoder model the kwargs should include <code>encoder_outputs</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Generates sequences of token ids for models with a language modeling head using <strong>greedy decoding</strong> and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.</p> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ( <span class="hljs-meta">... </span> AutoTokenizer, <span class="hljs-meta">... </span> AutoModelForCausalLM, <span class="hljs-meta">... </span> LogitsProcessorList, <span class="hljs-meta">... </span> MinLengthLogitsProcessor, <span class="hljs-meta">... </span> StoppingCriteriaList, <span class="hljs-meta">... </span> MaxLengthCriteria, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForCausalLM.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># set pad_token_id to eos_token_id because GPT2 does not have a EOS token</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.pad_token_id = model.config.eos_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>input_prompt = <span class="hljs-string">&quot;It might be possible to&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(input_prompt, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate logits processors</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits_processor = LogitsProcessorList( <span class="hljs-meta">... </span> [ <span class="hljs-meta">... </span> MinLengthLogitsProcessor(<span class="hljs-number">10</span>, eos_token_id=model.config.eos_token_id), <span class="hljs-meta">... </span> ] <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=<span class="hljs-number">20</span>)]) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.greedy_search( <span class="hljs-meta">... </span> input_ids, logits_processor=logits_processor, stopping_criteria=stopping_criteria <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(outputs, skip_special_tokens=<span class="hljs-literal">True</span>) [<span class="hljs-string">&quot;It might be possible to get a better understanding of the nature of the problem, but it&#x27;s not&quot;</span>]<!-- HTML_TAG_END --></pre></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.generation_utils.GenerationMixin.sample"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>sample</span></h4><!-- HTML_TAG_END --> <a id="transformers.generation_utils.GenerationMixin.sample" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.generation_utils.GenerationMixin.sample"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_utils.py#L1721" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits_processor<span class="opacity-60">: typing.Optional[transformers.generation_logits_process.LogitsProcessorList] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">stopping_criteria<span class="opacity-60">: typing.Optional[transformers.generation_stopping_criteria.StoppingCriteriaList] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits_warper<span class="opacity-60">: typing.Optional[transformers.generation_logits_process.LogitsProcessorList] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_scores<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict_in_generate<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">synced_gpus<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**model_kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.sample.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.sample.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; The sequence used as a prompt for the generation.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.sample.logits_processor" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.sample.logits_processor"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits_processor</strong> (<code>LogitsProcessorList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessorList">LogitsProcessorList</a>. List of instances of class derived from <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> used to modify the prediction scores of the language modeling head applied at each generation step.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.sample.stopping_criteria" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.sample.stopping_criteria"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>stopping_criteria</strong> (<code>StoppingCriteriaList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.StoppingCriteriaList">StoppingCriteriaList</a>. List of instances of class derived from <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.StoppingCriteria">StoppingCriteria</a> used to tell if the generation loop should stop.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.sample.logits_warper" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.sample.logits_warper"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits_warper</strong> (<code>LogitsProcessorList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessorList">LogitsProcessorList</a>. List of instances of class derived from <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsWarper">LogitsWarper</a> used to warp the prediction score distribution of the language modeling head applied before multinomial sampling at each generation step.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.sample.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.sample.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>, defaults to 20) &#x2014; <strong>DEPRECATED</strong>. Use <code>logits_processor</code> or <code>stopping_criteria</code> directly to cap the number of generated tokens. The maximum length of the sequence to be generated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.sample.pad_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.sample.pad_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.sample.eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.sample.eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.sample.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.sample.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.sample.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.sample.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.sample.output_scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.sample.output_scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_scores</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the prediction scores. See <code>scores</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.sample.return_dict_in_generate" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.sample.return_dict_in_generate"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict_in_generate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.sample.synced_gpus" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.sample.synced_gpus"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>synced_gpus</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs &#x2014; Additional model specific kwargs will be forwarded to the <code>forward</code> function of the model. If model is an encoder-decoder model the kwargs should include <code>encoder_outputs</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Generates sequences of token ids for models with a language modeling head using <strong>multinomial sampling</strong> and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.</p> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ( <span class="hljs-meta">... </span> AutoTokenizer, <span class="hljs-meta">... </span> AutoModelForCausalLM, <span class="hljs-meta">... </span> LogitsProcessorList, <span class="hljs-meta">... </span> MinLengthLogitsProcessor, <span class="hljs-meta">... </span> TopKLogitsWarper, <span class="hljs-meta">... </span> TemperatureLogitsWarper, <span class="hljs-meta">... </span> StoppingCriteriaList, <span class="hljs-meta">... </span> MaxLengthCriteria, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForCausalLM.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># set pad_token_id to eos_token_id because GPT2 does not have a EOS token</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.config.pad_token_id = model.config.eos_token_id <span class="hljs-meta">&gt;&gt;&gt; </span>input_prompt = <span class="hljs-string">&quot;Today is a beautiful day, and&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(input_prompt, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate logits processors</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits_processor = LogitsProcessorList( <span class="hljs-meta">... </span> [ <span class="hljs-meta">... </span> MinLengthLogitsProcessor(<span class="hljs-number">15</span>, eos_token_id=model.config.eos_token_id), <span class="hljs-meta">... </span> ] <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate logits processors</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits_warper = LogitsProcessorList( <span class="hljs-meta">... </span> [ <span class="hljs-meta">... </span> TopKLogitsWarper(<span class="hljs-number">50</span>), <span class="hljs-meta">... </span> TemperatureLogitsWarper(<span class="hljs-number">0.7</span>), <span class="hljs-meta">... </span> ] <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=<span class="hljs-number">20</span>)]) <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">0</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.sample( <span class="hljs-meta">... </span> input_ids, <span class="hljs-meta">... </span> logits_processor=logits_processor, <span class="hljs-meta">... </span> logits_warper=logits_warper, <span class="hljs-meta">... </span> stopping_criteria=stopping_criteria, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(outputs, skip_special_tokens=<span class="hljs-literal">True</span>) [<span class="hljs-string">&#x27;Today is a beautiful day, and a wonderful day.\n\nI was lucky enough to meet the&#x27;</span>]<!-- HTML_TAG_END --></pre></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.generation_utils.GenerationMixin.beam_search"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>beam_search</span></h4><!-- HTML_TAG_END --> <a id="transformers.generation_utils.GenerationMixin.beam_search" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.generation_utils.GenerationMixin.beam_search"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_utils.py#L1977" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">beam_scorer<span class="opacity-60">: BeamScorer</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits_processor<span class="opacity-60">: typing.Optional[transformers.generation_logits_process.LogitsProcessorList] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">stopping_criteria<span class="opacity-60">: typing.Optional[transformers.generation_stopping_criteria.StoppingCriteriaList] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_scores<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict_in_generate<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">synced_gpus<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**model_kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_search.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_search.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; The sequence used as a prompt for the generation.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_search.beam_scorer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_search.beam_scorer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>beam_scorer</strong> (<code>BeamScorer</code>) &#x2014; An derived instance of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.BeamScorer">BeamScorer</a> that defines how beam hypotheses are constructed, stored and sorted during generation. For more information, the documentation of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.BeamScorer">BeamScorer</a> should be read.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_search.logits_processor" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_search.logits_processor"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits_processor</strong> (<code>LogitsProcessorList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessorList">LogitsProcessorList</a>. List of instances of class derived from <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> used to modify the prediction scores of the language modeling head applied at each generation step.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_search.stopping_criteria" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_search.stopping_criteria"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>stopping_criteria</strong> (<code>StoppingCriteriaList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.StoppingCriteriaList">StoppingCriteriaList</a>. List of instances of class derived from <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.StoppingCriteria">StoppingCriteria</a> used to tell if the generation loop should stop.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_search.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_search.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>, defaults to 20) &#x2014; <strong>DEPRECATED</strong>. Use <code>logits_processor</code> or <code>stopping_criteria</code> directly to cap the number of generated tokens. The maximum length of the sequence to be generated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_search.pad_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_search.pad_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_search.eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_search.eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_search.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_search.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_search.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_search.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_search.output_scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_search.output_scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_scores</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the prediction scores. See <code>scores</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_search.return_dict_in_generate" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_search.return_dict_in_generate"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict_in_generate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_search.synced_gpus" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_search.synced_gpus"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>synced_gpus</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs &#x2014; Additional model specific kwargs will be forwarded to the <code>forward</code> function of the model. If model is an encoder-decoder model the kwargs should include <code>encoder_outputs</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Generates sequences of token ids for models with a language modeling head using <strong>beam search decoding</strong> and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.</p> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ( <span class="hljs-meta">... </span> AutoTokenizer, <span class="hljs-meta">... </span> AutoModelForSeq2SeqLM, <span class="hljs-meta">... </span> LogitsProcessorList, <span class="hljs-meta">... </span> MinLengthLogitsProcessor, <span class="hljs-meta">... </span> BeamSearchScorer, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;t5-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;t5-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_input_str = <span class="hljs-string">&quot;translate English to German: How old are you?&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_input_ids = tokenizer(encoder_input_str, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># lets run beam search using 3 beams</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_beams = <span class="hljs-number">3</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># define decoder start token ids</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = torch.ones((num_beams, <span class="hljs-number">1</span>), device=model.device, dtype=torch.long) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = input_ids * model.config.decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># add encoder_outputs to model keyword arguments</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model_kwargs = { <span class="hljs-meta">... </span> <span class="hljs-string">&quot;encoder_outputs&quot;</span>: model.get_encoder()( <span class="hljs-meta">... </span> encoder_input_ids.repeat_interleave(num_beams, dim=<span class="hljs-number">0</span>), return_dict=<span class="hljs-literal">True</span> <span class="hljs-meta">... </span> ) <span class="hljs-meta">... </span>} <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate beam scorer</span> <span class="hljs-meta">&gt;&gt;&gt; </span>beam_scorer = BeamSearchScorer( <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">1</span>, <span class="hljs-meta">... </span> num_beams=num_beams, <span class="hljs-meta">... </span> device=model.device, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate logits processors</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits_processor = LogitsProcessorList( <span class="hljs-meta">... </span> [ <span class="hljs-meta">... </span> MinLengthLogitsProcessor(<span class="hljs-number">5</span>, eos_token_id=model.config.eos_token_id), <span class="hljs-meta">... </span> ] <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.beam_search(input_ids, beam_scorer, logits_processor=logits_processor, **model_kwargs) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(outputs, skip_special_tokens=<span class="hljs-literal">True</span>) [<span class="hljs-string">&#x27;Wie alt bist du?&#x27;</span>]<!-- HTML_TAG_END --></pre></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.generation_utils.GenerationMixin.beam_sample"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>beam_sample</span></h4><!-- HTML_TAG_END --> <a id="transformers.generation_utils.GenerationMixin.beam_sample" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.generation_utils.GenerationMixin.beam_sample"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_utils.py#L2289" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">beam_scorer<span class="opacity-60">: BeamScorer</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits_processor<span class="opacity-60">: typing.Optional[transformers.generation_logits_process.LogitsProcessorList] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">stopping_criteria<span class="opacity-60">: typing.Optional[transformers.generation_stopping_criteria.StoppingCriteriaList] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits_warper<span class="opacity-60">: typing.Optional[transformers.generation_logits_process.LogitsProcessorList] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_scores<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict_in_generate<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">synced_gpus<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**model_kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_sample.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_sample.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; The sequence used as a prompt for the generation.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_sample.beam_scorer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_sample.beam_scorer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>beam_scorer</strong> (<code>BeamScorer</code>) &#x2014; A derived instance of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.BeamScorer">BeamScorer</a> that defines how beam hypotheses are constructed, stored and sorted during generation. For more information, the documentation of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.BeamScorer">BeamScorer</a> should be read.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_sample.logits_processor" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_sample.logits_processor"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits_processor</strong> (<code>LogitsProcessorList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessorList">LogitsProcessorList</a>. List of instances of class derived from <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> used to modify the prediction scores of the language modeling head applied at each generation step.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_sample.stopping_criteria" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_sample.stopping_criteria"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>stopping_criteria</strong> (<code>StoppingCriteriaList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.StoppingCriteriaList">StoppingCriteriaList</a>. List of instances of class derived from <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.StoppingCriteria">StoppingCriteria</a> used to tell if the generation loop should stop.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_sample.logits_warper" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_sample.logits_warper"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits_warper</strong> (<code>LogitsProcessorList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessorList">LogitsProcessorList</a>. List of instances of class derived from <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsWarper">LogitsWarper</a> used to warp the prediction score distribution of the language modeling head applied before multinomial sampling at each generation step.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_sample.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_sample.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>, defaults to 20) &#x2014; <strong>DEPRECATED</strong>. Use <code>logits_processor</code> or <code>stopping_criteria</code> directly to cap the number of generated tokens. The maximum length of the sequence to be generated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_sample.pad_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_sample.pad_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_sample.eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_sample.eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_sample.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_sample.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_sample.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_sample.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_sample.output_scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_sample.output_scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_scores</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the prediction scores. See <code>scores</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_sample.return_dict_in_generate" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_sample.return_dict_in_generate"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict_in_generate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.beam_sample.synced_gpus" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.beam_sample.synced_gpus"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>synced_gpus</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs &#x2014; Additional model specific kwargs will be forwarded to the <code>forward</code> function of the model. If model is an encoder-decoder model the kwargs should include <code>encoder_outputs</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Generates sequences of token ids for models with a language modeling head using <strong>beam search multinomial sampling</strong> and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.</p> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ( <span class="hljs-meta">... </span> AutoTokenizer, <span class="hljs-meta">... </span> AutoModelForSeq2SeqLM, <span class="hljs-meta">... </span> LogitsProcessorList, <span class="hljs-meta">... </span> MinLengthLogitsProcessor, <span class="hljs-meta">... </span> TopKLogitsWarper, <span class="hljs-meta">... </span> TemperatureLogitsWarper, <span class="hljs-meta">... </span> BeamSearchScorer, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;t5-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;t5-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_input_str = <span class="hljs-string">&quot;translate English to German: How old are you?&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_input_ids = tokenizer(encoder_input_str, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># lets run beam search using 3 beams</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_beams = <span class="hljs-number">3</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># define decoder start token ids</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = torch.ones((num_beams, <span class="hljs-number">1</span>), device=model.device, dtype=torch.long) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = input_ids * model.config.decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># add encoder_outputs to model keyword arguments</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model_kwargs = { <span class="hljs-meta">... </span> <span class="hljs-string">&quot;encoder_outputs&quot;</span>: model.get_encoder()( <span class="hljs-meta">... </span> encoder_input_ids.repeat_interleave(num_beams, dim=<span class="hljs-number">0</span>), return_dict=<span class="hljs-literal">True</span> <span class="hljs-meta">... </span> ) <span class="hljs-meta">... </span>} <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate beam scorer</span> <span class="hljs-meta">&gt;&gt;&gt; </span>beam_scorer = BeamSearchScorer( <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">1</span>, <span class="hljs-meta">... </span> max_length=model.config.max_length, <span class="hljs-meta">... </span> num_beams=num_beams, <span class="hljs-meta">... </span> device=model.device, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate logits processors</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits_processor = LogitsProcessorList( <span class="hljs-meta">... </span> [MinLengthLogitsProcessor(<span class="hljs-number">5</span>, eos_token_id=model.config.eos_token_id)] <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate logits processors</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits_warper = LogitsProcessorList( <span class="hljs-meta">... </span> [ <span class="hljs-meta">... </span> TopKLogitsWarper(<span class="hljs-number">50</span>), <span class="hljs-meta">... </span> TemperatureLogitsWarper(<span class="hljs-number">0.7</span>), <span class="hljs-meta">... </span> ] <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.beam_sample( <span class="hljs-meta">... </span> input_ids, beam_scorer, logits_processor=logits_processor, logits_warper=logits_warper, **model_kwargs <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(outputs, skip_special_tokens=<span class="hljs-literal">True</span>) [<span class="hljs-string">&#x27;Wie alt bist du?&#x27;</span>]<!-- HTML_TAG_END --></pre></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.generation_utils.GenerationMixin.group_beam_search"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>group_beam_search</span></h4><!-- HTML_TAG_END --> <a id="transformers.generation_utils.GenerationMixin.group_beam_search" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.generation_utils.GenerationMixin.group_beam_search"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_utils.py#L2611" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">beam_scorer<span class="opacity-60">: BeamScorer</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits_processor<span class="opacity-60">: typing.Optional[transformers.generation_logits_process.LogitsProcessorList] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">stopping_criteria<span class="opacity-60">: typing.Optional[transformers.generation_stopping_criteria.StoppingCriteriaList] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_scores<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict_in_generate<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">synced_gpus<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**model_kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.group_beam_search.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.group_beam_search.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; The sequence used as a prompt for the generation.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.group_beam_search.beam_scorer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.group_beam_search.beam_scorer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>beam_scorer</strong> (<code>BeamScorer</code>) &#x2014; An derived instance of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.BeamScorer">BeamScorer</a> that defines how beam hypotheses are constructed, stored and sorted during generation. For more information, the documentation of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.BeamScorer">BeamScorer</a> should be read.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.group_beam_search.logits_processor" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.group_beam_search.logits_processor"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits_processor</strong> (<code>LogitsProcessorList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessorList">LogitsProcessorList</a>. List of instances of class derived from <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> used to modify the prediction scores of the language modeling head applied at each generation step.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.group_beam_search.stopping_criteria" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.group_beam_search.stopping_criteria"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>stopping_criteria</strong> (<code>StoppingCriteriaList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.StoppingCriteriaList">StoppingCriteriaList</a>. List of instances of class derived from <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.StoppingCriteria">StoppingCriteria</a> used to tell if the generation loop should stop.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.group_beam_search.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.group_beam_search.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>, defaults to 20) &#x2014; <strong>DEPRECATED</strong>. Use <code>logits_processor</code> or <code>stopping_criteria</code> directly to cap the number of generated tokens. The maximum length of the sequence to be generated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.group_beam_search.pad_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.group_beam_search.pad_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.group_beam_search.eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.group_beam_search.eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.group_beam_search.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.group_beam_search.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.group_beam_search.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.group_beam_search.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.group_beam_search.output_scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.group_beam_search.output_scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_scores</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the prediction scores. See <code>scores</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.group_beam_search.return_dict_in_generate" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.group_beam_search.return_dict_in_generate"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict_in_generate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.group_beam_search.synced_gpus" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.group_beam_search.synced_gpus"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>synced_gpus</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to continue running the while loop until max_length (needed for ZeRO stage 3)</p> <p>model_kwargs &#x2014; Additional model specific kwargs that will be forwarded to the <code>forward</code> function of the model. If model is an encoder-decoder model the kwargs should include <code>encoder_outputs</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Generates sequences of token ids for models with a language modeling head using <strong>diverse beam search decoding</strong> and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.</p> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ( <span class="hljs-meta">... </span> AutoTokenizer, <span class="hljs-meta">... </span> AutoModelForSeq2SeqLM, <span class="hljs-meta">... </span> LogitsProcessorList, <span class="hljs-meta">... </span> MinLengthLogitsProcessor, <span class="hljs-meta">... </span> HammingDiversityLogitsProcessor, <span class="hljs-meta">... </span> BeamSearchScorer, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;t5-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;t5-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_input_str = <span class="hljs-string">&quot;translate English to German: How old are you?&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_input_ids = tokenizer(encoder_input_str, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># lets run diverse beam search using 6 beams</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_beams = <span class="hljs-number">6</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># define decoder start token ids</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = torch.ones((num_beams, <span class="hljs-number">1</span>), device=model.device, dtype=torch.long) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = input_ids * model.config.decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># add encoder_outputs to model keyword arguments</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model_kwargs = { <span class="hljs-meta">... </span> <span class="hljs-string">&quot;encoder_outputs&quot;</span>: model.get_encoder()( <span class="hljs-meta">... </span> encoder_input_ids.repeat_interleave(num_beams, dim=<span class="hljs-number">0</span>), return_dict=<span class="hljs-literal">True</span> <span class="hljs-meta">... </span> ) <span class="hljs-meta">... </span>} <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate beam scorer</span> <span class="hljs-meta">&gt;&gt;&gt; </span>beam_scorer = BeamSearchScorer( <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">1</span>, <span class="hljs-meta">... </span> max_length=model.config.max_length, <span class="hljs-meta">... </span> num_beams=num_beams, <span class="hljs-meta">... </span> device=model.device, <span class="hljs-meta">... </span> num_beam_groups=<span class="hljs-number">3</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate logits processors</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits_processor = LogitsProcessorList( <span class="hljs-meta">... </span> [ <span class="hljs-meta">... </span> HammingDiversityLogitsProcessor(<span class="hljs-number">5.5</span>, num_beams=<span class="hljs-number">6</span>, num_beam_groups=<span class="hljs-number">3</span>), <span class="hljs-meta">... </span> MinLengthLogitsProcessor(<span class="hljs-number">5</span>, eos_token_id=model.config.eos_token_id), <span class="hljs-meta">... </span> ] <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.group_beam_search( <span class="hljs-meta">... </span> input_ids, beam_scorer, logits_processor=logits_processor, **model_kwargs <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(outputs, skip_special_tokens=<span class="hljs-literal">True</span>) [<span class="hljs-string">&#x27;Wie alt bist du?&#x27;</span>]<!-- HTML_TAG_END --></pre></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.generation_utils.GenerationMixin.constrained_beam_search"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>constrained_beam_search</span></h4><!-- HTML_TAG_END --> <a id="transformers.generation_utils.GenerationMixin.constrained_beam_search" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.generation_utils.GenerationMixin.constrained_beam_search"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_utils.py#L2976" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">constrained_beam_scorer<span class="opacity-60">: ConstrainedBeamSearchScorer</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits_processor<span class="opacity-60">: typing.Optional[transformers.generation_logits_process.LogitsProcessorList] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">stopping_criteria<span class="opacity-60">: typing.Optional[transformers.generation_stopping_criteria.StoppingCriteriaList] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_scores<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict_in_generate<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">synced_gpus<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**model_kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.constrained_beam_search.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.constrained_beam_search.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; The sequence used as a prompt for the generation.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.constrained_beam_search.constrained_beam_scorer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.constrained_beam_search.constrained_beam_scorer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>constrained_beam_scorer</strong> (<code>ConstrainedBeamSearchScorer</code>) &#x2014; A derived instance of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.BeamScorer">BeamScorer</a> that defines how beam hypotheses are constructed, stored and sorted during generation, while satisfying a list of positive constraints. For more information, the documentation of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.ConstrainedBeamSearchScorer">ConstrainedBeamSearchScorer</a> should be read.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.constrained_beam_search.logits_processor" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.constrained_beam_search.logits_processor"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits_processor</strong> (<code>LogitsProcessorList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessorList">LogitsProcessorList</a>. List of instances of class derived from <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> used to modify the prediction scores of the language modeling head applied at each generation step.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.constrained_beam_search.stopping_criteria" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.constrained_beam_search.stopping_criteria"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>stopping_criteria</strong> (<code>StoppingCriteriaList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.StoppingCriteriaList">StoppingCriteriaList</a>. List of instances of class derived from <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.StoppingCriteria">StoppingCriteria</a> used to tell if the generation loop should stop.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.constrained_beam_search.logits_warper" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.constrained_beam_search.logits_warper"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits_warper</strong> (<code>LogitsProcessorList</code>, <em>optional</em>) &#x2014; An instance of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessorList">LogitsProcessorList</a>. List of instances of class derived from <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsWarper">LogitsWarper</a> used to warp the prediction score distribution of the language modeling head applied before multinomial sampling at each generation step.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.constrained_beam_search.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.constrained_beam_search.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>, defaults to 20) &#x2014; <strong>DEPRECATED</strong>. Use <code>logits_processor</code> or <code>stopping_criteria</code> directly to cap the number of generated tokens. The maximum length of the sequence to be generated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.constrained_beam_search.pad_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.constrained_beam_search.pad_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.constrained_beam_search.eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.constrained_beam_search.eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.constrained_beam_search.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.constrained_beam_search.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.constrained_beam_search.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.constrained_beam_search.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.constrained_beam_search.output_scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.constrained_beam_search.output_scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_scores</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the prediction scores. See <code>scores</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.constrained_beam_search.return_dict_in_generate" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.constrained_beam_search.return_dict_in_generate"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict_in_generate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GenerationMixin.constrained_beam_search.synced_gpus" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GenerationMixin.constrained_beam_search.synced_gpus"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>synced_gpus</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to continue running the while loop until max_length (needed for ZeRO stage 3) model_kwargs &#x2014; Additional model specific kwargs will be forwarded to the <code>forward</code> function of the model. If model is an encoder-decoder model the kwargs should include <code>encoder_outputs</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Generates sequences of token ids for models with a language modeling head using <strong>constrained beam search decoding</strong> and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.</p> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ( <span class="hljs-meta">... </span> AutoTokenizer, <span class="hljs-meta">... </span> AutoModelForSeq2SeqLM, <span class="hljs-meta">... </span> LogitsProcessorList, <span class="hljs-meta">... </span> MinLengthLogitsProcessor, <span class="hljs-meta">... </span> ConstrainedBeamSearchScorer, <span class="hljs-meta">... </span> PhrasalConstraint, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;t5-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;t5-base&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_input_str = <span class="hljs-string">&quot;translate English to German: How old are you?&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>encoder_input_ids = tokenizer(encoder_input_str, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># lets run beam search using 3 beams</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_beams = <span class="hljs-number">3</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># define decoder start token ids</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = torch.ones((num_beams, <span class="hljs-number">1</span>), device=model.device, dtype=torch.long) <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = input_ids * model.config.decoder_start_token_id <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># add encoder_outputs to model keyword arguments</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model_kwargs = { <span class="hljs-meta">... </span> <span class="hljs-string">&quot;encoder_outputs&quot;</span>: model.get_encoder()( <span class="hljs-meta">... </span> encoder_input_ids.repeat_interleave(num_beams, dim=<span class="hljs-number">0</span>), return_dict=<span class="hljs-literal">True</span> <span class="hljs-meta">... </span> ) <span class="hljs-meta">... </span>} <span class="hljs-meta">&gt;&gt;&gt; </span>constraint_str = <span class="hljs-string">&quot;sind&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>constraint_token_ids = tokenizer.encode(constraint_str)[:-<span class="hljs-number">1</span>] <span class="hljs-comment"># slice to remove eos token</span> <span class="hljs-meta">&gt;&gt;&gt; </span>constraints = [PhrasalConstraint(token_ids=constraint_token_ids)] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate beam scorer</span> <span class="hljs-meta">&gt;&gt;&gt; </span>beam_scorer = ConstrainedBeamSearchScorer( <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">1</span>, num_beams=num_beams, device=model.device, constraints=constraints <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># instantiate logits processors</span> <span class="hljs-meta">&gt;&gt;&gt; </span>logits_processor = LogitsProcessorList( <span class="hljs-meta">... </span> [ <span class="hljs-meta">... </span> MinLengthLogitsProcessor(<span class="hljs-number">5</span>, eos_token_id=model.config.eos_token_id), <span class="hljs-meta">... </span> ] <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.constrained_beam_search( <span class="hljs-meta">... </span> input_ids, beam_scorer, constraints=constraints, logits_processor=logits_processor, **model_kwargs <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(outputs, skip_special_tokens=<span class="hljs-literal">True</span>) [<span class="hljs-string">&#x27;Wie alt sind Sie?&#x27;</span>]<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.generation_tf_utils.TFGenerationMixin" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFGenerationMixn </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.generation_tf_utils.TFGenerationMixin"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.generation_tf_utils.</span><span class="font-semibold">TFGenerationMixin</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.generation_tf_utils.TFGenerationMixin" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.generation_tf_utils.TFGenerationMixin"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_tf_utils.py#L342" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>A class containing all of the functions supporting generation, to be used as a mixin in <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.generation_tf_utils.TFGenerationMixin.generate"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>generate</span></h4><!-- HTML_TAG_END --> <a id="transformers.generation_tf_utils.TFGenerationMixin.generate" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.generation_tf_utils.TFGenerationMixin.generate"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_tf_utils.py#L362" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">min_length<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_sample<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">early_stopping<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_beams<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">temperature<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">top_k<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">top_p<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">repetition_penalty<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bad_words_ids<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bos_token_id<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_id<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">length_penalty<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">no_repeat_ngram_size<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_return_sequences<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_start_token_id<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_cache<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_scores<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_attentions<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_hidden_states<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict_in_generate<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">forced_bos_token_id<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">forced_eos_token_id<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**model_kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-default"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput" >ModelOutput</a> or <code>tf.Tensor</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>, `(batch_size, sequence_length, &#x2014;<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.feature_dim)`" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.feature_dim)`"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>feature_dim)`</strong> or <code>(batch_size, num_channels, height, width)</code>, <em>optional</em>) &#x2014; The sequence used as a prompt for the generation or as model inputs to the encoder. If <code>None</code> the method initializes it with <code>bos_token_id</code> and a batch size of 1. For decoder-only models <code>inputs</code> should of in the format of <code>input_ids</code>. For encoder-decoder models <em>inputs</em> can represent any of <code>input_ids</code>, <code>input_values</code>, <code>input_features</code>, or <code>pixel_values</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>, defaults to 20) &#x2014; The maximum length of the sequence to be generated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.min_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.min_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>min_length</strong> (<code>int</code>, <em>optional</em>, defaults to 10) &#x2014; The minimum length of the sequence to be generated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.do_sample" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.do_sample"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>do_sample</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use sampling ; use greedy decoding otherwise.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.early_stopping" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.early_stopping"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>early_stopping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to stop the beam search when at least <code>num_beams</code> sentences are finished per batch or not.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.num_beams" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.num_beams"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_beams</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Number of beams for beam search. 1 means no beam search.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.temperature" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.temperature"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>temperature</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; The value used to module the next token probabilities.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.top_k" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.top_k"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>top_k</strong> (<code>int</code>, <em>optional</em>, defaults to 50) &#x2014; The number of highest probability vocabulary tokens to keep for top-k-filtering.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.top_p" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.top_p"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>top_p</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; If set to float &lt; 1, only the most probable tokens with probabilities that add up to <code>top_p</code> or higher are kept for generation.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.repetition_penalty" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.repetition_penalty"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>repetition_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; The parameter for repetition penalty. 1.0 means no penalty. See <a href="https://arxiv.org/pdf/1909.05858.pdf" rel="nofollow">this paper</a> for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.pad_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.pad_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.bos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.bos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>beginning-of-sequence</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.length_penalty" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.length_penalty"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>length_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; Exponential penalty to the length. 1.0 means no penalty.</p> <p>Set to values &lt; 1.0 in order to encourage the model to generate shorter sequences, to a value &gt; 1.0 in order to encourage the model to produce longer sequences.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.no_repeat_ngram_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.no_repeat_ngram_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>no_repeat_ngram_size</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to int &gt; 0, all ngrams of that size can only occur once.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.bad_words_ids(List[int]," class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.bad_words_ids(List[int],"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bad_words_ids(<code>List[int]</code>,</strong> <em>optional</em>) &#x2014; List of token ids that are not allowed to be generated. In order to get the tokens of the words that should not appear in the generated text, use <code>tokenizer.encode(bad_word, add_prefix_space=True)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.num_return_sequences(int," class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.num_return_sequences(int,"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_return_sequences(<code>int</code>,</strong> <em>optional</em>, defaults to 1) &#x2014; The number of independently computed returned sequences for each element in the batch.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>tf.Tensor</code> of <code>dtype=tf.int32</code> and shape <code>(batch_size, sequence_length)</code>, <em>optional</em>) &#x2014; Mask to avoid performing attention on padding token indices. Mask values are in <code>[0, 1]</code>, 1 for tokens that are not masked, and 0 for masked tokens.</p> <p>If not provided, will default to a tensor the same shape as <code>input_ids</code> that masks the pad token.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.decoder_start_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.decoder_start_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_start_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; If an encoder-decoder model starts decoding with a different token than <em>bos</em>, the id of that token. use_cache &#x2014; (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>): Whether or not the model should use the past last key/values attentions (if applicable to the model) to speed up decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the attentions tensors of all attention layers. See <code>attentions</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the hidden states of all layers. See <code>hidden_states</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.output_scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.output_scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_scores</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the prediction scores. See <code>scores</code> under returned tensors for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.return_dict_in_generate" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.return_dict_in_generate"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict_in_generate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.forced_bos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.forced_bos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>forced_bos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the token to force as the first generated token after the <code>decoder_start_token_id</code>. Useful for multilingual models like <a href="../model_doc/mbart">mBART</a> where the first generated token needs to be the target language token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_tf_utils.TFGenerationMixin.generate.forced_eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_tf_utils.TFGenerationMixin.generate.forced_eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>forced_eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the token to force as the last generated token when <code>max_length</code> is reached. model_specific_kwargs &#x2014; Additional model specific kwargs will be forwarded to the <code>forward</code> function of the model.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.generation_tf_utils.TFGenerationMixin.generate.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput" >ModelOutput</a> or <code>tf.Tensor</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --><!-- HTML_TAG_END --></p></div></div> <p>Generates sequences for models with a language modeling head. The method currently supports greedy decoding, beam-search decoding, sampling with temperature, sampling with top-k or nucleus sampling.</p> <p>Adapted in part from <a href="https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529" rel="nofollow">Facebook’s XLM beam search code</a>.</p> <p>Apart from <code>input_ids</code> and <code>attention_mask</code>, all the arguments below will default to the value of the attribute of the same name inside the <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> of the model. The default values indicated are the default values of those config.</p> <p>Most of these parameters are explained in more detail in <a href="https://huggingface.co/blog/how-to-generate" rel="nofollow">this blog post</a>.</p> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilgpt2&quot;</span>) <span class="hljs-comment"># Initialize tokenizer</span> model = TFAutoModelWithLMHead.from_pretrained( <span class="hljs-string">&quot;distilgpt2&quot;</span> ) <span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> outputs = model.generate(max_length=<span class="hljs-number">40</span>) <span class="hljs-comment"># do greedy decoding</span> <span class="hljs-built_in">print</span>(<span class="hljs-string">f&quot;Generated: <span class="hljs-subst">{tokenizer.decode(outputs[<span class="hljs-number">0</span>], skip_special_tokens=<span class="hljs-literal">True</span>)}</span>&quot;</span>) tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;openai-gpt&quot;</span>) <span class="hljs-comment"># Initialize tokenizer</span> model = TFAutoModelWithLMHead.from_pretrained( <span class="hljs-string">&quot;openai-gpt&quot;</span> ) <span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> input_context = <span class="hljs-string">&quot;The dog&quot;</span> input_ids = tokenizer.encode(input_context, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-comment"># encode input context</span> outputs = model.generate( input_ids=input_ids, num_beams=<span class="hljs-number">5</span>, num_return_sequences=<span class="hljs-number">3</span>, temperature=<span class="hljs-number">1.5</span> ) <span class="hljs-comment"># generate 3 independent sequences using beam search decoding (5 beams) with sampling from initial context &#x27;The dog&#x27;</span> <span class="hljs-keyword">for</span> i <span class="hljs-keyword">in</span> <span class="hljs-built_in">range</span>(<span class="hljs-number">3</span>): <span class="hljs-comment"># 3 output sequences were generated</span> <span class="hljs-built_in">print</span>(<span class="hljs-string">f&quot;Generated <span class="hljs-subst">{i}</span>: <span class="hljs-subst">{tokenizer.decode(outputs[i], skip_special_tokens=<span class="hljs-literal">True</span>)}</span>&quot;</span>) tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilgpt2&quot;</span>) <span class="hljs-comment"># Initialize tokenizer</span> model = TFAutoModelWithLMHead.from_pretrained( <span class="hljs-string">&quot;distilgpt2&quot;</span> ) <span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> input_context = <span class="hljs-string">&quot;The dog&quot;</span> input_ids = tokenizer.encode(input_context, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-comment"># encode input context</span> outputs = model.generate( input_ids=input_ids, max_length=<span class="hljs-number">40</span>, temperature=<span class="hljs-number">0.7</span>, num_return_sequences=<span class="hljs-number">3</span>, do_sample=<span class="hljs-literal">True</span> ) <span class="hljs-comment"># generate 3 candidates using sampling</span> <span class="hljs-keyword">for</span> i <span class="hljs-keyword">in</span> <span class="hljs-built_in">range</span>(<span class="hljs-number">3</span>): <span class="hljs-comment"># 3 output sequences were generated</span> <span class="hljs-built_in">print</span>(<span class="hljs-string">f&quot;Generated <span class="hljs-subst">{i}</span>: <span class="hljs-subst">{tokenizer.decode(outputs[i], skip_special_tokens=<span class="hljs-literal">True</span>)}</span>&quot;</span>) tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;ctrl&quot;</span>) <span class="hljs-comment"># Initialize tokenizer</span> model = TFAutoModelWithLMHead.from_pretrained( <span class="hljs-string">&quot;ctrl&quot;</span> ) <span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> input_context = <span class="hljs-string">&quot;Legal My neighbor is&quot;</span> <span class="hljs-comment"># &quot;Legal&quot; is one of the control codes for ctrl</span> input_ids = tokenizer.encode(input_context, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-comment"># encode input context</span> outputs = model.generate( input_ids=input_ids, max_length=<span class="hljs-number">50</span>, temperature=<span class="hljs-number">0.7</span>, repetition_penalty=<span class="hljs-number">1.2</span> ) <span class="hljs-comment"># generate sequences</span> <span class="hljs-built_in">print</span>(<span class="hljs-string">f&quot;Generated: <span class="hljs-subst">{tokenizer.decode(outputs[<span class="hljs-number">0</span>], skip_special_tokens=<span class="hljs-literal">True</span>)}</span>&quot;</span>) tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) <span class="hljs-comment"># Initialize tokenizer</span> model = TFAutoModelWithLMHead.from_pretrained( <span class="hljs-string">&quot;gpt2&quot;</span> ) <span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> input_context = <span class="hljs-string">&quot;My cute dog&quot;</span> bad_words_ids = [ tokenizer.encode(bad_word, add_prefix_space=<span class="hljs-literal">True</span>) <span class="hljs-keyword">for</span> bad_word <span class="hljs-keyword">in</span> [<span class="hljs-string">&quot;idiot&quot;</span>, <span class="hljs-string">&quot;stupid&quot;</span>, <span class="hljs-string">&quot;shut up&quot;</span>] ] input_ids = tokenizer.encode(input_context, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>) <span class="hljs-comment"># encode input context</span> outputs = model.generate( input_ids=input_ids, max_length=<span class="hljs-number">100</span>, do_sample=<span class="hljs-literal">True</span>, bad_words_ids=bad_words_ids ) <span class="hljs-comment"># generate sequences without allowing bad_words to be generated</span><!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.generation_flax_utils.FlaxGenerationMixin" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_flax_utils.FlaxGenerationMixin"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxGenerationMixn </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.generation_flax_utils.FlaxGenerationMixin"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.generation_flax_utils.</span><span class="font-semibold">FlaxGenerationMixin</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.generation_flax_utils.FlaxGenerationMixin" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.generation_flax_utils.FlaxGenerationMixin"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_flax_utils.py#L119" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>A class containing all functions for auto-regressive text generation, to be used as a mixin in <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel">FlaxPreTrainedModel</a>.</p> <p>The class exposes <a href="/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_flax_utils.FlaxGenerationMixin.generate">generate()</a>, which can be used for:</p> <ul><li><em>greedy decoding</em> by calling <code>_greedy_search()</code>if <code>num_beams=1</code> and <code>do_sample=False</code>.</li> <li><em>multinomial sampling</em> by calling <code>_sample()</code>if <code>num_beams=1</code> and <code>do_sample=True</code>.</li> <li><em>beam-search decoding</em> by calling <code>_beam_search</code> if <code>num_beams&gt;1</code> and <code>do_sample=False</code>.</li></ul> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.generation_flax_utils.FlaxGenerationMixin.generate"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>generate</span></h4><!-- HTML_TAG_END --> <a id="transformers.generation_flax_utils.FlaxGenerationMixin.generate" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.generation_flax_utils.FlaxGenerationMixin.generate"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_flax_utils.py#L163" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bos_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_start_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_sample<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">prng_key<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">top_k<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">top_p<span class="opacity-60">: typing.Optional[float] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">temperature<span class="opacity-60">: typing.Optional[float] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_beams<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">no_repeat_ngram_size<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">min_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">forced_bos_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">forced_eos_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">length_penalty<span class="opacity-60">: typing.Optional[float] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">early_stopping<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">trace<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60">: typing.Union[typing.Dict[str, jax._src.numpy.lax_numpy.ndarray], NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**model_kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_flax_utils.FlaxGenerationMixin.generate.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_flax_utils.FlaxGenerationMixin.generate.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; The sequence used as a prompt for the generation.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_flax_utils.FlaxGenerationMixin.generate.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_flax_utils.FlaxGenerationMixin.generate.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>, defaults to 20) &#x2014; The maximum length of the sequence to be generated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_flax_utils.FlaxGenerationMixin.generate.do_sample" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_flax_utils.FlaxGenerationMixin.generate.do_sample"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>do_sample</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to use sampling ; use greedy decoding otherwise.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_flax_utils.FlaxGenerationMixin.generate.temperature" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_flax_utils.FlaxGenerationMixin.generate.temperature"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>temperature</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; The value used to module the next token probabilities.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_flax_utils.FlaxGenerationMixin.generate.top_k" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_flax_utils.FlaxGenerationMixin.generate.top_k"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>top_k</strong> (<code>int</code>, <em>optional</em>, defaults to 50) &#x2014; The number of highest probability vocabulary tokens to keep for top-k-filtering.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_flax_utils.FlaxGenerationMixin.generate.top_p" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_flax_utils.FlaxGenerationMixin.generate.top_p"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>top_p</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; If set to float &lt; 1, only the most probable tokens with probabilities that add up to <code>top_p</code> or higher are kept for generation.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_flax_utils.FlaxGenerationMixin.generate.pad_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_flax_utils.FlaxGenerationMixin.generate.pad_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_flax_utils.FlaxGenerationMixin.generate.bos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_flax_utils.FlaxGenerationMixin.generate.bos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>beginning-of-sequence</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_flax_utils.FlaxGenerationMixin.generate.eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_flax_utils.FlaxGenerationMixin.generate.eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_flax_utils.FlaxGenerationMixin.generate.num_beams" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_flax_utils.FlaxGenerationMixin.generate.num_beams"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_beams</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Number of beams for beam search. 1 means no beam search.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_flax_utils.FlaxGenerationMixin.generate.decoder_start_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_flax_utils.FlaxGenerationMixin.generate.decoder_start_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_start_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; If an encoder-decoder model starts decoding with a different token than <em>bos</em>, the id of that token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_flax_utils.FlaxGenerationMixin.generate.trace" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_flax_utils.FlaxGenerationMixin.generate.trace"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>trace</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to trace generation. Setting <code>trace=False</code> should only be used for debugging and will lead to a considerably slower runtime.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_flax_utils.FlaxGenerationMixin.generate.params" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_flax_utils.FlaxGenerationMixin.generate.params"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>params</strong> (<code>Dict[str, jnp.ndarray]</code>, <em>optional</em>) &#x2014; Optionally the model parameters can be passed. Can be useful for parallelized generation. model<em>kwargs &#x2014; Additional model specific kwargs will be forwarded to the <code>forward</code> function of the model. If the model is an encoder-decoder model, encoder specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder</em>*. Also accepts <code>encoder_outputs</code> to skip encoder part.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Generates sequences of token ids for models with a language modeling head. The method supports the following generation methods for text-decoder, text-to-text, speech-to-text, and vision-to-text models:</p> <ul><li><em>greedy decoding</em> by calling <code>_greedy_search()</code>if <code>num_beams=1</code> and <code>do_sample=False</code>.</li> <li><em>multinomial sampling</em> by calling <code>_sample()</code>if <code>num_beams=1</code> and <code>do_sample=True</code>.</li> <li><em>beam-search decoding</em> by calling <code>_beam_search</code> if <code>num_beams&gt;1</code> and <code>do_sample=False</code>.</li></ul> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"><p>Apart from <code>inputs</code>, all the arguments below will default to the value of the attribute of the same name as defined in the model’s config (<code>config.json</code>) which in turn defaults to the <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> of the model.</p></div> <p>Most of these parameters are explained in more detail in <a href="https://huggingface.co/blog/how-to-generate" rel="nofollow">this blog post</a>.</p> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, FlaxAutoModelForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilgpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxAutoModelForCausalLM.from_pretrained(<span class="hljs-string">&quot;distilgpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>input_context = <span class="hljs-string">&quot;The dog&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># encode input context</span> <span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = tokenizer(input_context, return_tensors=<span class="hljs-string">&quot;np&quot;</span>).input_ids <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># generate candidates using sampling</span> <span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model.generate(input_ids=input_ids, max_length=<span class="hljs-number">20</span>, top_k=<span class="hljs-number">30</span>, do_sample=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(outputs, skip_special_tokens=<span class="hljs-literal">True</span>)<!-- HTML_TAG_END --></pre></div></div></div> <script type="module" data-hydrate="juymav"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="juymav"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/main_classes/text_generation.mdx-5e23a84f.js") ], params: {} } }); </script>
427
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/main_classes/pipelines.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;pipelines&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.pipeline&quot;,&quot;title&quot;:&quot;The pipeline abstraction&quot;},{&quot;local&quot;:&quot;pipeline-batching&quot;,&quot;title&quot;:&quot;Pipeline batching&quot;},{&quot;local&quot;:&quot;pipeline-chunk-batching&quot;,&quot;title&quot;:&quot;Pipeline chunk batching&quot;},{&quot;local&quot;:&quot;pipeline-custom-code&quot;,&quot;title&quot;:&quot;Pipeline custom code&quot;},{&quot;local&quot;:&quot;implementing-a-pipeline&quot;,&quot;title&quot;:&quot;Implementing a pipeline&quot;},{&quot;local&quot;:&quot;the-task-specific-pipelines&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.AudioClassificationPipeline&quot;,&quot;title&quot;:&quot;AudioClassificationPipeline&quot;},{&quot;local&quot;:&quot;transformers.AutomaticSpeechRecognitionPipeline&quot;,&quot;title&quot;:&quot;AutomaticSpeechRecognitionPipeline&quot;},{&quot;local&quot;:&quot;transformers.Conversation&quot;,&quot;title&quot;:&quot;ConversationalPipeline&quot;},{&quot;local&quot;:&quot;transformers.FeatureExtractionPipeline&quot;,&quot;title&quot;:&quot;FeatureExtractionPipeline&quot;},{&quot;local&quot;:&quot;transformers.FillMaskPipeline&quot;,&quot;title&quot;:&quot;FillMaskPipeline&quot;},{&quot;local&quot;:&quot;transformers.ImageClassificationPipeline&quot;,&quot;title&quot;:&quot;ImageClassificationPipeline&quot;},{&quot;local&quot;:&quot;transformers.ImageSegmentationPipeline&quot;,&quot;title&quot;:&quot;ImageSegmentationPipeline&quot;},{&quot;local&quot;:&quot;transformers.TokenClassificationPipeline&quot;,&quot;title&quot;:&quot;NerPipeline&quot;},{&quot;local&quot;:&quot;transformers.ObjectDetectionPipeline&quot;,&quot;title&quot;:&quot;ObjectDetectionPipeline&quot;},{&quot;local&quot;:&quot;transformers.QuestionAnsweringPipeline&quot;,&quot;title&quot;:&quot;QuestionAnsweringPipeline&quot;},{&quot;local&quot;:&quot;transformers.SummarizationPipeline&quot;,&quot;title&quot;:&quot;SummarizationPipeline&quot;},{&quot;local&quot;:&quot;transformers.TableQuestionAnsweringPipeline&quot;,&quot;title&quot;:&quot;TableQuestionAnsweringPipeline&quot;},{&quot;local&quot;:&quot;transformers.TextClassificationPipeline&quot;,&quot;title&quot;:&quot;TextClassificationPipeline&quot;},{&quot;local&quot;:&quot;transformers.TextGenerationPipeline&quot;,&quot;title&quot;:&quot;TextGenerationPipeline&quot;},{&quot;local&quot;:&quot;transformers.Text2TextGenerationPipeline&quot;,&quot;title&quot;:&quot;Text2TextGenerationPipeline&quot;},{&quot;local&quot;:&quot;transformers.TokenClassificationPipeline&quot;,&quot;title&quot;:&quot;TokenClassificationPipeline&quot;},{&quot;local&quot;:&quot;transformers.TranslationPipeline&quot;,&quot;title&quot;:&quot;TranslationPipeline&quot;},{&quot;local&quot;:&quot;transformers.ZeroShotClassificationPipeline&quot;,&quot;title&quot;:&quot;ZeroShotClassificationPipeline&quot;},{&quot;local&quot;:&quot;transformers.ZeroShotImageClassificationPipeline&quot;,&quot;title&quot;:&quot;ZeroShotImageClassificationPipeline&quot;}],&quot;title&quot;:&quot;The task specific pipelines&quot;},{&quot;local&quot;:&quot;transformers.Pipeline&quot;,&quot;title&quot;:&quot;Parent class: `Pipeline`&quot;}],&quot;title&quot;:&quot;Pipelines&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/main_classes/pipelines.mdx-dd4bd714.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="pipelines" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#pipelines"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Pipelines </span></h1> <p>The pipelines are a great and easy way to use models for inference. These pipelines are objects that abstract most of the complex code from the library, offering a simple API dedicated to several tasks, including Named Entity Recognition, Masked Language Modeling, Sentiment Analysis, Feature Extraction and Question Answering. See the <a href="../task_summary">task summary</a> for examples of use.</p> <p>There are two categories of pipeline abstractions to be aware about:</p> <ul><li><p>The <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> which is the most powerful object encapsulating all other pipelines.</p></li> <li><p>The other task-specific pipelines:</p> <ul><li><a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.AudioClassificationPipeline">AudioClassificationPipeline</a></li> <li><a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.AutomaticSpeechRecognitionPipeline">AutomaticSpeechRecognitionPipeline</a></li> <li><a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.ConversationalPipeline">ConversationalPipeline</a></li> <li><a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.FeatureExtractionPipeline">FeatureExtractionPipeline</a></li> <li><a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.FillMaskPipeline">FillMaskPipeline</a></li> <li><a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.ImageClassificationPipeline">ImageClassificationPipeline</a></li> <li><a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.ImageSegmentationPipeline">ImageSegmentationPipeline</a></li> <li><a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.ObjectDetectionPipeline">ObjectDetectionPipeline</a></li> <li><a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.QuestionAnsweringPipeline">QuestionAnsweringPipeline</a></li> <li><a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.SummarizationPipeline">SummarizationPipeline</a></li> <li><a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.TableQuestionAnsweringPipeline">TableQuestionAnsweringPipeline</a></li> <li><a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.TextClassificationPipeline">TextClassificationPipeline</a></li> <li><a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.TextGenerationPipeline">TextGenerationPipeline</a></li> <li><a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.Text2TextGenerationPipeline">Text2TextGenerationPipeline</a></li> <li><a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.TokenClassificationPipeline">TokenClassificationPipeline</a></li> <li><a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.TranslationPipeline">TranslationPipeline</a></li> <li><a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.ZeroShotClassificationPipeline">ZeroShotClassificationPipeline</a></li> <li><a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.ZeroShotImageClassificationPipeline">ZeroShotImageClassificationPipeline</a></li></ul></li></ul> <h2 class="relative group"><a id="transformers.pipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.pipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>The pipeline abstraction </span></h2> <p>The <em>pipeline</em> abstraction is a wrapper around all the other available pipelines. It is instantiated as any other pipeline but can provide additional quality of life.</p> <p>Simple call on one item:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>pipe = pipeline(<span class="hljs-string">&quot;text-classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>pipe(<span class="hljs-string">&quot;This restaurant is awesome&quot;</span>) [{<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;POSITIVE&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.9998743534088135</span>}]<!-- HTML_TAG_END --></pre></div> <p>If you want to use a specific model from the <a href="https://huggingface.co" rel="nofollow">hub</a> you can ignore the task if the model on the hub already defines it:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>pipe = pipeline(model=<span class="hljs-string">&quot;roberta-large-mnli&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>pipe(<span class="hljs-string">&quot;This restaurant is awesome&quot;</span>) [{<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;POSITIVE&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.9998743534088135</span>}]<!-- HTML_TAG_END --></pre></div> <p>To call a pipeline on many items, you can either call with a <em>list</em>.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>pipe = pipeline(<span class="hljs-string">&quot;text-classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>pipe([<span class="hljs-string">&quot;This restaurant is awesome&quot;</span>, <span class="hljs-string">&quot;This restaurant is aweful&quot;</span>]) [{<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;POSITIVE&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.9998743534088135</span>}, {<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;NEGATIVE&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.9996669292449951</span>}]<!-- HTML_TAG_END --></pre></div> <p>To iterate of full datasets it is recommended to use a <code>dataset</code> directly. This means you don’t need to allocate the whole dataset at once, nor do you need to do batching yourself. This should work just as fast as custom loops on GPU. If it doesn’t don’t hesitate to create an issue.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">import</span> datasets <span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-keyword">from</span> transformers.pipelines.pt_utils <span class="hljs-keyword">import</span> KeyDataset <span class="hljs-keyword">from</span> tqdm.auto <span class="hljs-keyword">import</span> tqdm pipe = pipeline(<span class="hljs-string">&quot;automatic-speech-recognition&quot;</span>, model=<span class="hljs-string">&quot;facebook/wav2vec2-base-960h&quot;</span>, device=<span class="hljs-number">0</span>) dataset = datasets.load_dataset(<span class="hljs-string">&quot;superb&quot;</span>, name=<span class="hljs-string">&quot;asr&quot;</span>, split=<span class="hljs-string">&quot;test&quot;</span>) <span class="hljs-comment"># KeyDataset (only *pt*) will simply return the item in the dict returned by the dataset item</span> <span class="hljs-comment"># as we&#x27;re not interested in the *target* part of the dataset.</span> <span class="hljs-keyword">for</span> out <span class="hljs-keyword">in</span> tqdm(pipe(KeyDataset(dataset, <span class="hljs-string">&quot;file&quot;</span>))): <span class="hljs-built_in">print</span>(out) <span class="hljs-comment"># {&quot;text&quot;: &quot;NUMBER TEN FRESH NELLY IS WAITING ON YOU GOOD NIGHT HUSBAND&quot;}</span> <span class="hljs-comment"># {&quot;text&quot;: ....}</span> <span class="hljs-comment"># ....</span><!-- HTML_TAG_END --></pre></div> <p>For ease of use, a generator is also possible:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline pipe = pipeline(<span class="hljs-string">&quot;text-classification&quot;</span>) <span class="hljs-keyword">def</span> <span class="hljs-title function_">data</span>(): <span class="hljs-keyword">while</span> <span class="hljs-literal">True</span>: <span class="hljs-comment"># This could come from a dataset, a database, a queue or HTTP request</span> <span class="hljs-comment"># in a server</span> <span class="hljs-comment"># Caveat: because this is iterative, you cannot use `num_workers &gt; 1` variable</span> <span class="hljs-comment"># to use multiple threads to preprocess data. You can still have 1 thread that</span> <span class="hljs-comment"># does the preprocessing while the main runs the big inference</span> <span class="hljs-keyword">yield</span> <span class="hljs-string">&quot;This is a test&quot;</span> <span class="hljs-keyword">for</span> out <span class="hljs-keyword">in</span> pipe(data()): <span class="hljs-built_in">print</span>(out) <span class="hljs-comment"># {&quot;text&quot;: &quot;NUMBER TEN FRESH NELLY IS WAITING ON YOU GOOD NIGHT HUSBAND&quot;}</span> <span class="hljs-comment"># {&quot;text&quot;: ....}</span> <span class="hljs-comment"># ....</span><!-- HTML_TAG_END --></pre></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.pipeline"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.pipeline</span></h4><!-- HTML_TAG_END --> <a id="transformers.pipeline" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.pipeline"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines.py#L373" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">task<span class="opacity-60">: str = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model<span class="opacity-60">: typing.Optional = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: typing.Union[str, transformers.configuration_utils.PretrainedConfig, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer<span class="opacity-60">: typing.Union[str, transformers.tokenization_utils.PreTrainedTokenizer, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">feature_extractor<span class="opacity-60">: typing.Union[str, ForwardRef(&#39;SequenceFeatureExtractor&#39;), NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">framework<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">revision<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_fast<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_auth_token<span class="opacity-60">: typing.Union[str, bool, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model_kwargs<span class="opacity-60">: typing.Dict[str, typing.Any] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pipeline_class<span class="opacity-60">: typing.Optional[typing.Any] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.Pipeline" >Pipeline</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.pipeline.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.pipeline.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>) &#x2014; The task defining which pipeline will be returned. Currently accepted tasks are:</p> <ul> <li><code>&quot;audio-classification&quot;</code>: will return a <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.AudioClassificationPipeline">AudioClassificationPipeline</a>.</li> <li><code>&quot;automatic-speech-recognition&quot;</code>: will return a <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.AutomaticSpeechRecognitionPipeline">AutomaticSpeechRecognitionPipeline</a>.</li> <li><code>&quot;conversational&quot;</code>: will return a <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.ConversationalPipeline">ConversationalPipeline</a>.</li> <li><code>&quot;feature-extraction&quot;</code>: will return a <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.FeatureExtractionPipeline">FeatureExtractionPipeline</a>.</li> <li><code>&quot;fill-mask&quot;</code>: will return a <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.FillMaskPipeline">FillMaskPipeline</a>:.</li> <li><code>&quot;image-classification&quot;</code>: will return a <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.ImageClassificationPipeline">ImageClassificationPipeline</a>.</li> <li><code>&quot;question-answering&quot;</code>: will return a <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.QuestionAnsweringPipeline">QuestionAnsweringPipeline</a>.</li> <li><code>&quot;table-question-answering&quot;</code>: will return a <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.TableQuestionAnsweringPipeline">TableQuestionAnsweringPipeline</a>.</li> <li><code>&quot;text2text-generation&quot;</code>: will return a <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.Text2TextGenerationPipeline">Text2TextGenerationPipeline</a>.</li> <li><code>&quot;text-classification&quot;</code> (alias <code>&quot;sentiment-analysis&quot;</code> available): will return a <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.TextClassificationPipeline">TextClassificationPipeline</a>.</li> <li><code>&quot;text-generation&quot;</code>: will return a <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.TextGenerationPipeline">TextGenerationPipeline</a>:.</li> <li><code>&quot;token-classification&quot;</code> (alias <code>&quot;ner&quot;</code> available): will return a <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.TokenClassificationPipeline">TokenClassificationPipeline</a>.</li> <li><code>&quot;translation&quot;</code>: will return a <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.TranslationPipeline">TranslationPipeline</a>.</li> <li><code>&quot;translation_xx_to_yy&quot;</code>: will return a <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.TranslationPipeline">TranslationPipeline</a>.</li> <li><code>&quot;summarization&quot;</code>: will return a <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.SummarizationPipeline">SummarizationPipeline</a>.</li> <li><code>&quot;zero-shot-classification&quot;</code>: will return a <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.ZeroShotClassificationPipeline">ZeroShotClassificationPipeline</a>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.pipeline.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.pipeline.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>, <em>optional</em>) &#x2014; The model that will be used by the pipeline to make predictions. This can be a model identifier or an actual instance of a pretrained model inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> (for PyTorch) or <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> (for TensorFlow).</p> <p>If not provided, the default for the <code>task</code> will be loaded.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.pipeline.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.pipeline.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>, <em>optional</em>) &#x2014; The configuration that will be used by the pipeline to instantiate the model. This can be a model identifier or an actual pretrained model configuration inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>.</p> <p>If not provided, the default configuration file for the requested model will be used. That means that if <code>model</code> is given, its default configuration will be used. However, if <code>model</code> is not supplied, this <code>task</code>&#x2019;s default model&#x2019;s config is used instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.pipeline.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.pipeline.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>, <em>optional</em>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This can be a model identifier or an actual pretrained tokenizer inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.</p> <p>If not provided, the default tokenizer for the given <code>model</code> will be loaded (if it is a string). If <code>model</code> is not specified or not a string, then the default tokenizer for <code>config</code> is loaded (if it is a string). However, if <code>config</code> is also not given or not a string, then the default tokenizer for the given <code>task</code> will be loaded.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.pipeline.feature_extractor" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.pipeline.feature_extractor"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>feature_extractor</strong> (<code>str</code> or <code>PreTrainedFeatureExtractor</code>, <em>optional</em>) &#x2014; The feature extractor that will be used by the pipeline to encode data for the model. This can be a model identifier or an actual pretrained feature extractor inheriting from <code>PreTrainedFeatureExtractor</code>.</p> <p>Feature extractors are used for non-NLP models, such as Speech or Vision models as well as multi-modal models. Multi-modal models will also require a tokenizer to be passed.</p> <p>If not provided, the default feature extractor for the given <code>model</code> will be loaded (if it is a string). If <code>model</code> is not specified or not a string, then the default feature extractor for <code>config</code> is loaded (if it is a string). However, if <code>config</code> is also not given or not a string, then the default feature extractor for the given <code>task</code> will be loaded.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.pipeline.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.pipeline.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.pipeline.revision" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.pipeline.revision"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>revision</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;main&quot;</code>) &#x2014; When passing a task name or a string model identifier: The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <code>revision</code> can be any identifier allowed by git.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.pipeline.use_fast" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.pipeline.use_fast"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_fast</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to use a Fast tokenizer if possible (a <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.pipeline.use_auth_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.pipeline.use_auth_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_auth_token</strong> (<code>str</code> or <em>bool</em>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>transformers-cli login</code> (stored in <code>~/.huggingface</code>). model_kwargs &#x2014; Additional dictionary of keyword arguments passed along to the model&#x2019;s <code>from_pretrained(..., **model_kwargs)</code> function. kwargs &#x2014; Additional keyword arguments passed along to the specific pipeline init (see the documentation for the corresponding pipeline class for possible values).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.pipeline.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.Pipeline" >Pipeline</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A suitable pipeline for the task.</p> <!-- HTML_TAG_END --></p></div></div> <p>Utility factory method to build a <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.Pipeline">Pipeline</a>.</p> <p>Pipelines are made of:</p> <ul><li>A <a href="tokenizer">tokenizer</a> in charge of mapping raw textual input to token.</li> <li>A <a href="model">model</a> to make predictions from the inputs.</li> <li>Some (optional) post processing for enhancing model’s output.</li></ul> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline, AutoModelForTokenClassification, AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Sentiment analysis pipeline</span> <span class="hljs-meta">&gt;&gt;&gt; </span>pipeline(<span class="hljs-string">&quot;sentiment-analysis&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Question answering pipeline, specifying the checkpoint identifier</span> <span class="hljs-meta">&gt;&gt;&gt; </span>pipeline(<span class="hljs-string">&quot;question-answering&quot;</span>, model=<span class="hljs-string">&quot;distilbert-base-cased-distilled-squad&quot;</span>, tokenizer=<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Named entity recognition pipeline, passing in a specific model and tokenizer</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForTokenClassification.from_pretrained(<span class="hljs-string">&quot;dbmdz/bert-large-cased-finetuned-conll03-english&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>pipeline(<span class="hljs-string">&quot;ner&quot;</span>, model=model, tokenizer=tokenizer)<!-- HTML_TAG_END --></pre></div></div> <h2 class="relative group"><a id="pipeline-batching" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#pipeline-batching"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Pipeline batching </span></h2> <p>All pipelines can use batching. This will work whenever the pipeline uses its streaming ability (so when passing lists or <code>Dataset</code> or <code>generator</code>).</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-keyword">from</span> transformers.pipelines.pt_utils <span class="hljs-keyword">import</span> KeyDataset <span class="hljs-keyword">import</span> datasets dataset = datasets.load_dataset(<span class="hljs-string">&quot;imdb&quot;</span>, name=<span class="hljs-string">&quot;plain_text&quot;</span>, split=<span class="hljs-string">&quot;unsupervised&quot;</span>) pipe = pipeline(<span class="hljs-string">&quot;text-classification&quot;</span>, device=<span class="hljs-number">0</span>) <span class="hljs-keyword">for</span> out <span class="hljs-keyword">in</span> pipe(KeyDataset(dataset, <span class="hljs-string">&quot;text&quot;</span>), batch_size=<span class="hljs-number">8</span>, truncation=<span class="hljs-string">&quot;only_first&quot;</span>): <span class="hljs-built_in">print</span>(out) <span class="hljs-comment"># [{&#x27;label&#x27;: &#x27;POSITIVE&#x27;, &#x27;score&#x27;: 0.9998743534088135}]</span> <span class="hljs-comment"># Exactly the same output as before, but the content are passed</span> <span class="hljs-comment"># as batches to the model</span><!-- HTML_TAG_END --></pre></div> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"><p>However, this is not automatically a win for performance. It can be either a 10x speedup or 5x slowdown depending on hardware, data and the actual model being used.</p> <p>Example where it’s mostly a speedup:</p></div> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-keyword">from</span> torch.utils.data <span class="hljs-keyword">import</span> Dataset <span class="hljs-keyword">from</span> tqdm.auto <span class="hljs-keyword">import</span> tqdm pipe = pipeline(<span class="hljs-string">&quot;text-classification&quot;</span>, device=<span class="hljs-number">0</span>) <span class="hljs-keyword">class</span> <span class="hljs-title class_">MyDataset</span>(<span class="hljs-title class_ inherited__">Dataset</span>): <span class="hljs-keyword">def</span> <span class="hljs-title function_">__len__</span>(<span class="hljs-params">self</span>): <span class="hljs-keyword">return</span> <span class="hljs-number">5000</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">__getitem__</span>(<span class="hljs-params">self, i</span>): <span class="hljs-keyword">return</span> <span class="hljs-string">&quot;This is a test&quot;</span> dataset = MyDataset() <span class="hljs-keyword">for</span> batch_size <span class="hljs-keyword">in</span> [<span class="hljs-number">1</span>, <span class="hljs-number">8</span>, <span class="hljs-number">64</span>, <span class="hljs-number">256</span>]: <span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;-&quot;</span> * <span class="hljs-number">30</span>) <span class="hljs-built_in">print</span>(<span class="hljs-string">f&quot;Streaming batch_size=<span class="hljs-subst">{batch_size}</span>&quot;</span>) <span class="hljs-keyword">for</span> out <span class="hljs-keyword">in</span> tqdm(pipe(dataset, batch_size=batch_size), total=<span class="hljs-built_in">len</span>(dataset)): <span class="hljs-keyword">pass</span><!-- HTML_TAG_END --></pre></div> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-section"># On GTX 970 ------------------------------</span> Streaming no batching 100%|██████████████████████████████████████████████████████████████████████| 5000/5000 [00:26&lt;00:00, 187.52it/s] <span class="hljs-code">------------------------------ Streaming batch_size=8 100%|█████████████████████████████████████████████████████████████████████| 5000/5000 [00:04&lt;00:00, 1205.95it/s] ------------------------------</span> Streaming batch<span class="hljs-emphasis">_size=64 100%|█████████████████████████████████████████████████████████████████████| 5000/5000 [00:02&lt;00:00, 2478.24it/s] ------------------------------ Streaming batch_</span>size=256 100%|█████████████████████████████████████████████████████████████████████| 5000/5000 [00:01&lt;00:00, 2554.43it/s] (diminishing returns, saturated the GPU)<!-- HTML_TAG_END --></pre></div> <p>Example where it’s most a slowdown:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">class</span> <span class="hljs-title class_">MyDataset</span>(<span class="hljs-title class_ inherited__">Dataset</span>): <span class="hljs-keyword">def</span> <span class="hljs-title function_">__len__</span>(<span class="hljs-params">self</span>): <span class="hljs-keyword">return</span> <span class="hljs-number">5000</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">__getitem__</span>(<span class="hljs-params">self, i</span>): <span class="hljs-keyword">if</span> i % <span class="hljs-number">64</span> == <span class="hljs-number">0</span>: n = <span class="hljs-number">100</span> <span class="hljs-keyword">else</span>: n = <span class="hljs-number">1</span> <span class="hljs-keyword">return</span> <span class="hljs-string">&quot;This is a test&quot;</span> * n<!-- HTML_TAG_END --></pre></div> <p>This is a occasional very long sentence compared to the other. In that case, the <strong>whole</strong> batch will need to be 400 tokens long, so the whole batch will be [64, 400] instead of [64, 4], leading to the high slowdown. Even worse, on bigger batches, the program simply crashes.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-comment">------------------------------</span> Streaming no batching <span class="hljs-number">100</span>%|█████████████████████████████████████████████████████████████████████| <span class="hljs-number">1000</span>/<span class="hljs-number">1000</span> [<span class="hljs-number">00</span>:<span class="hljs-number">05</span>&lt;<span class="hljs-number">00</span>:<span class="hljs-number">00</span>, <span class="hljs-number">183.69</span><span class="hljs-keyword">it</span>/s] <span class="hljs-comment">------------------------------</span> Streaming batch_size=<span class="hljs-number">8</span> <span class="hljs-number">100</span>%|█████████████████████████████████████████████████████████████████████| <span class="hljs-number">1000</span>/<span class="hljs-number">1000</span> [<span class="hljs-number">00</span>:<span class="hljs-number">03</span>&lt;<span class="hljs-number">00</span>:<span class="hljs-number">00</span>, <span class="hljs-number">265.74</span><span class="hljs-keyword">it</span>/s] <span class="hljs-comment">------------------------------</span> Streaming batch_size=<span class="hljs-number">64</span> <span class="hljs-number">100</span>%|██████████████████████████████████████████████████████████████████████| <span class="hljs-number">1000</span>/<span class="hljs-number">1000</span> [<span class="hljs-number">00</span>:<span class="hljs-number">26</span>&lt;<span class="hljs-number">00</span>:<span class="hljs-number">00</span>, <span class="hljs-number">37.80</span><span class="hljs-keyword">it</span>/s] <span class="hljs-comment">------------------------------</span> Streaming batch_size=<span class="hljs-number">256</span> <span class="hljs-number">0</span>%| | <span class="hljs-number">0</span>/<span class="hljs-number">1000</span> [<span class="hljs-number">00</span>:<span class="hljs-number">00</span><span class="hljs-meta">&lt;?</span>, ?<span class="hljs-keyword">it</span>/s] Traceback (most recent call <span class="hljs-keyword">last</span>): File <span class="hljs-string">&quot;/home/nicolas/src/transformers/test.py&quot;</span>, <span class="hljs-built_in">line</span> <span class="hljs-number">42</span>, <span class="hljs-keyword">in</span> &lt;module&gt; <span class="hljs-keyword">for</span> out <span class="hljs-keyword">in</span> tqdm(pipe(dataset, batch_size=<span class="hljs-number">256</span>), total=<span class="hljs-built_in">len</span>(dataset)): .... q = q / math.<span class="hljs-built_in">sqrt</span>(dim_per_head) <span class="hljs-comment"># (bs, n_heads, q_length, dim_per_head)</span> RuntimeError: CUDA out <span class="hljs-keyword">of</span> memory. Tried <span class="hljs-built_in">to</span> allocate <span class="hljs-number">376.00</span> MiB (GPU <span class="hljs-number">0</span>; <span class="hljs-number">3.95</span> GiB total capacity; <span class="hljs-number">1.72</span> GiB already allocated; <span class="hljs-number">354.88</span> MiB free; <span class="hljs-number">2.46</span> GiB reserved <span class="hljs-keyword">in</span> total <span class="hljs-keyword">by</span> PyTorch)<!-- HTML_TAG_END --></pre></div> <p>There are no good (general) solutions for this problem, and your mileage may vary depending on your use cases. Rule of thumb:</p> <p>For users, a rule of thumb is:</p> <ul><li><p><strong>Measure performance on your load, with your hardware. Measure, measure, and keep measuring. Real numbers are the only way to go.</strong></p></li> <li><p>If you are latency constrained (live product doing inference), don’t batch</p></li> <li><p>If you are using CPU, don’t batch.</p></li> <li><p>If you are using throughput (you want to run your model on a bunch of static data), on GPU, then:</p> <ul><li>If you have no clue about the size of the sequence_length (“natural” data), by default don’t batch, measure and try tentatively to add it, add OOM checks to recover when it will fail (and it will at some point if you don’t control the sequence_length.)</li> <li>If your sequence_length is super regular, then batching is more likely to be VERY interesting, measure and push it until you get OOMs.</li> <li>The larger the GPU the more likely batching is going to be more interesting</li></ul></li> <li><p>As soon as you enable batching, make sure you can handle OOMs nicely.</p></li></ul> <h2 class="relative group"><a id="pipeline-chunk-batching" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#pipeline-chunk-batching"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Pipeline chunk batching </span></h2> <p><code>zero-shot-classification</code> and <code>question-answering</code> are slightly specific in the sense, that a single input might yield multiple forward pass of a model. Under normal circumstances, this would yield issues with <code>batch_size</code> argument.</p> <p>In order to circumvent this issue, both of these pipelines are a bit specific, they are <code>ChunkPipeline</code> instead of regular <code>Pipeline</code>. In short:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->preprocessed = pipe.preprocess(inputs) model_outputs = pipe.forward(preprocessed) outputs = pipe.postprocess(model_outputs)<!-- HTML_TAG_END --></pre></div> <p>Now becomes:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->all_model_outputs = [] <span class="hljs-keyword">for</span> preprocessed <span class="hljs-keyword">in</span> pipe.preprocess(inputs): model_outputs = pipe.forward(preprocessed) all_model_outputs.append(model_outputs) outputs = pipe.postprocess(all_model_outputs)<!-- HTML_TAG_END --></pre></div> <p>This should be very transparent to your code because the pipelines are used in the same way.</p> <p>This is a simplified view, since the pipeline can handle automatically the batch to ! Meaning you don’t have to care about how many forward passes you inputs are actually going to trigger, you can optimize the <code>batch_size</code> independently of the inputs. The caveats from the previous section still apply.</p> <h2 class="relative group"><a id="pipeline-custom-code" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#pipeline-custom-code"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Pipeline custom code </span></h2> <p>If you want to override a specific pipeline.</p> <p>Don’t hesitate to create an issue for your task at hand, the goal of the pipeline is to be easy to use and support most cases, so <code>transformers</code> could maybe support your use case.</p> <p>If you want to try simply you can:</p> <ul><li>Subclass your pipeline of choice</li></ul> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">class</span> <span class="hljs-title class_">MyPipeline</span>(<span class="hljs-title class_ inherited__">TextClassificationPipeline</span>): <span class="hljs-keyword">def</span> <span class="hljs-title function_">postprocess</span>(): <span class="hljs-comment"># Your code goes here</span> scores = scores * <span class="hljs-number">100</span> <span class="hljs-comment"># And here</span> my_pipeline = MyPipeline(model=model, tokenizer=tokenizer, ...) <span class="hljs-comment"># or if you use *pipeline* function, then:</span> my_pipeline = pipeline(model=<span class="hljs-string">&quot;xxxx&quot;</span>, pipeline_class=MyPipeline)<!-- HTML_TAG_END --></pre></div> <p>That should enable you to do all the custom code you want.</p> <h2 class="relative group"><a id="implementing-a-pipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#implementing-a-pipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Implementing a pipeline </span></h2> <p><a href="../add_new_pipeline">Implementing a new pipeline</a></p> <h2 class="relative group"><a id="the-task-specific-pipelines" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#the-task-specific-pipelines"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>The task specific pipelines </span></h2> <h3 class="relative group"><a id="transformers.AudioClassificationPipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AudioClassificationPipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>AudioClassificationPipeline </span></h3> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.AudioClassificationPipeline"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">AudioClassificationPipeline</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.AudioClassificationPipeline" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.AudioClassificationPipeline"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/audio_classification.py#L67" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.AudioClassificationPipeline.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AudioClassificationPipeline.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.AudioClassificationPipeline.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AudioClassificationPipeline.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.AudioClassificationPipeline.modelcard" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AudioClassificationPipeline.modelcard"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code> <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.AudioClassificationPipeline.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AudioClassificationPipeline.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.AudioClassificationPipeline.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AudioClassificationPipeline.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.AudioClassificationPipeline.num_workers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AudioClassificationPipeline.num_workers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.AudioClassificationPipeline.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AudioClassificationPipeline.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.AudioClassificationPipeline.args_parser" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AudioClassificationPipeline.args_parser"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args_parser</strong> (<a href="/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.AudioClassificationPipeline.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AudioClassificationPipeline.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.AudioClassificationPipeline.binary_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AudioClassificationPipeline.binary_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Audio classification pipeline using any <code>AutoModelForAudioClassification</code>. This pipeline predicts the class of a raw waveform or an audio file. In case of an audio file, ffmpeg should be installed to support multiple audio formats.</p> <p>This pipeline can currently be loaded from <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> using the following task identifier: <code>&quot;audio-classification&quot;</code>.</p> <p>See the list of available models on <a href="https://huggingface.co/models?filter=audio-classification" rel="nofollow">huggingface.co/models</a>.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.AudioClassificationPipeline.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.AudioClassificationPipeline.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.AudioClassificationPipeline.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/audio_classification.py#L90" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60">: typing.Union[numpy.ndarray, bytes, str]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>A list of <code>dict</code> with the following keys</span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.AudioClassificationPipeline.__call__.inputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AudioClassificationPipeline.__call__.inputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs</strong> (<code>np.ndarray</code> or <code>bytes</code> or <code>str</code>) &#x2014; The inputs is either a raw waveform (<code>np.ndarray</code> of shape (n, ) of type <code>np.float32</code> or <code>np.float64</code>) at the correct sampling rate (no further check will be done) or a <code>str</code> that is the filename of the audio file, the file will be read at the correct sampling rate to get the waveform using <em>ffmpeg</em>. This requires <em>ffmpeg</em> to be installed on the system. If <em>inputs</em> is <code>bytes</code> it is supposed to be the content of an audio file and is interpreted by <em>ffmpeg</em> in the same way.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.AudioClassificationPipeline.__call__.top_k" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AudioClassificationPipeline.__call__.top_k"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>top_k</strong> (<code>int</code>, <em>optional</em>, defaults to None) &#x2014; The number of top labels that will be returned by the pipeline. If the provided number is <code>None</code> or higher than the number of labels available in the model configuration, it will default to the number of labels.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.AudioClassificationPipeline.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>A list of <code>dict</code> with the following keys</p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <ul> <li><strong>label</strong> (<code>str</code>) — The label predicted.</li> <li><strong>score</strong> (<code>float</code>) — The corresponding probability.</li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>Classify the sequence(s) given as inputs. See the <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.AutomaticSpeechRecognitionPipeline">AutomaticSpeechRecognitionPipeline</a> documentation for more information.</p></div></div> <h3 class="relative group"><a id="transformers.AutomaticSpeechRecognitionPipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AutomaticSpeechRecognitionPipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>AutomaticSpeechRecognitionPipeline </span></h3> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.AutomaticSpeechRecognitionPipeline"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">AutomaticSpeechRecognitionPipeline</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.AutomaticSpeechRecognitionPipeline" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.AutomaticSpeechRecognitionPipeline"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/automatic_speech_recognition.py#L69" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">feature_extractor<span class="opacity-60">: typing.Union[ForwardRef(&#39;SequenceFeatureExtractor&#39;), str]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Pipeline that aims at extracting spoken text contained within some audio.</p> <p>The input can be either a raw waveform or a audio file. In case of the audio file, ffmpeg should be installed for to support multiple audio formats</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.AutomaticSpeechRecognitionPipeline.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.AutomaticSpeechRecognitionPipeline.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.AutomaticSpeechRecognitionPipeline.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/automatic_speech_recognition.py#L127" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60">: typing.Union[numpy.ndarray, bytes, str]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>Dict</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.AutomaticSpeechRecognitionPipeline.__call__.inputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AutomaticSpeechRecognitionPipeline.__call__.inputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs</strong> (<code>np.ndarray</code> or <code>bytes</code> or <code>str</code> or <code>dict</code>) &#x2014; The inputs is either :<ul> <li><code>str</code> that is the filename of the audio file, the file will be read at the correct sampling rate to get the waveform using <em>ffmpeg</em>. This requires <em>ffmpeg</em> to be installed on the system.</li> <li><code>bytes</code> it is supposed to be the content of an audio file and is interpreted by <em>ffmpeg</em> in the same way.</li> <li>(<code>np.ndarray</code> of shape (n, ) of type <code>np.float32</code> or <code>np.float64</code>) Raw audio at the correct sampling rate (no further check will be done)</li> <li><code>dict</code> form can be used to pass raw audio sampled at arbitrary <code>sampling_rate</code> and let this pipeline do the resampling. The dict must be in the format <code>{&quot;sampling_rate&quot;: int, &quot;raw&quot;: np.array}</code> with optionally a <code>&quot;stride&quot;: (left: int, right: int)</code> than can ask the pipeline to treat the first <code>left</code> samples and last <code>right</code> samples to be ignored in decoding (but used at inference to provide more context to the model). Only use <code>stride</code> with CTC models.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.AutomaticSpeechRecognitionPipeline.__call__.return_timestamps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AutomaticSpeechRecognitionPipeline.__call__.return_timestamps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_timestamps</strong> (<em>optional</em>, <code>str</code>) &#x2014; Only available for pure CTC models. If set to <code>&quot;char&quot;</code>, the pipeline will return <code>timestamps</code> along the text for every character in the text. For instance if you get <code>[{&quot;text&quot;: &quot;h&quot;, &quot;timestamps&quot;: (0.5,0.6), {&quot;text&quot;: &quot;i&quot;, &quot;timestamps&quot;: (0.7, .9)}]</code>, then it means the model predicts that the letter &#x201C;h&#x201D; was pronounced after <code>0.5</code> and before <code>0.6</code> seconds. If set to <code>&quot;word&quot;</code>, the pipeline will return <code>timestamps</code> along the text for every word in the text. For instance if you get <code>[{&quot;text&quot;: &quot;hi &quot;, &quot;timestamps&quot;: (0.5,0.9), {&quot;text&quot;: &quot;there&quot;, &quot;timestamps&quot;: (1.0, .1.5)}]</code>, then it means the model predicts that the word &#x201C;hi&#x201D; was pronounces before 0.5 and after 0.9 seconds.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.AutomaticSpeechRecognitionPipeline.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>Dict</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A dictionary with the following keys:</p> <ul> <li><strong>text</strong> (<code>str</code> ) — The recognized text.</li> <li><strong>chunks</strong> (<em>optional(, <code>List[Dict]</code>) When using <code>return_timestamps</code>, the <code>chunks</code> will become a list containing all the various text chunks identified by the model, </em>e.g.* <code>[&#123;"text": "hi ", "timestamps": (0.5,0.9), &#123;"text": "there", "timestamps": (1.0, 1.5)&#125;]</code>. The original full text can roughly be recovered by doing <code>"".join(chunk["text"] for chunk in output["chunks"])</code>.</li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>Classify the sequence(s) given as inputs. See the <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.AutomaticSpeechRecognitionPipeline">AutomaticSpeechRecognitionPipeline</a> documentation for more information.</p></div></div> <h3 class="relative group"><a id="transformers.Conversation" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Conversation"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ConversationalPipeline </span></h3> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Conversation"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">Conversation</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.Conversation" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Conversation"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/conversational.py#L19" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text<span class="opacity-60">: str = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">conversation_id<span class="opacity-60">: UUID = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_user_inputs<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">generated_responses<span class="opacity-60"> = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Conversation.text" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Conversation.text"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text</strong> (<code>str</code>, <em>optional</em>) &#x2014; The initial user input to start the conversation. If not provided, a user input needs to be provided manually using the <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.Conversation.add_user_input">add_user_input()</a> method before the conversation can begin.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Conversation.conversation_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Conversation.conversation_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>conversation_id</strong> (<code>uuid.UUID</code>, <em>optional</em>) &#x2014; Unique identifier for the conversation. If not provided, a random UUID4 id will be assigned to the conversation.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Conversation.past_user_inputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Conversation.past_user_inputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_user_inputs</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; Eventual past history of the conversation of the user. You don&#x2019;t need to pass it manually if you use the pipeline interactively but if you want to recreate history you need to set both <code>past_user_inputs</code> and <code>generated_responses</code> with equal length lists of strings<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Conversation.generated_responses" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Conversation.generated_responses"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>generated_responses</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; Eventual past history of the conversation of the model. You don&#x2019;t need to pass it manually if you use the pipeline interactively but if you want to recreate history you need to set both <code>past_user_inputs</code> and <code>generated_responses</code> with equal length lists of strings<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Utility class containing a conversation and its history. This class is meant to be used as an input to the <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.ConversationalPipeline">ConversationalPipeline</a>. The conversation contains a number of utility function to manage the addition of new user input and generated model responses. A conversation needs to contain an unprocessed user input before being passed to the <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.ConversationalPipeline">ConversationalPipeline</a>. This user input is either created when the class is instantiated, or by calling <code>conversational_pipeline.append_response(&quot;input&quot;)</code> after a conversation turn.</p> <p>Usage:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->conversation = Conversation(<span class="hljs-string">&quot;Going to the movies tonight - any suggestions?&quot;</span>) <span class="hljs-comment"># Steps usually performed by the model when generating a response:</span> <span class="hljs-comment"># 1. Mark the user input as processed (moved to the history)</span> conversation.mark_processed() <span class="hljs-comment"># 2. Append a mode response</span> conversation.append_response(<span class="hljs-string">&quot;The Big lebowski.&quot;</span>) conversation.add_user_input(<span class="hljs-string">&quot;Is it good?&quot;</span>)<!-- HTML_TAG_END --></pre></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Conversation.add_user_input"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>add_user_input</span></h4><!-- HTML_TAG_END --> <a id="transformers.Conversation.add_user_input" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Conversation.add_user_input"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/conversational.py#L83" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text<span class="opacity-60">: str</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">overwrite<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Conversation.add_user_input.text" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Conversation.add_user_input.text"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text</strong> (<code>str</code>) &#x2014; The user input for the next conversation round.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Conversation.add_user_input.overwrite" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Conversation.add_user_input.overwrite"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>overwrite</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not existing and unprocessed user input should be overwritten when this function is called.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Add a user input to the conversation for the next round. This populates the internal <code>new_user_input</code> field.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Conversation.append_response"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>append_response</span></h4><!-- HTML_TAG_END --> <a id="transformers.Conversation.append_response" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Conversation.append_response"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/conversational.py#L116" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">response<span class="opacity-60">: str</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Conversation.append_response.response" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Conversation.append_response.response"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>response</strong> (<code>str</code>) &#x2014; The model generated response.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Append a response to the list of generated responses.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Conversation.iter_texts"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>iter_texts</span></h4><!-- HTML_TAG_END --> <a id="transformers.Conversation.iter_texts" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Conversation.iter_texts"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/conversational.py#L125" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Iterates over all blobs of the conversation.</p> <p>Returns: Iterator of (is_user, text_chunk) in chronological order of the conversation. <code>is_user</code> is a <code>bool</code>, <code>text_chunks</code> is a <code>str</code>.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Conversation.mark_processed"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>mark_processed</span></h4><!-- HTML_TAG_END --> <a id="transformers.Conversation.mark_processed" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Conversation.mark_processed"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/conversational.py#L107" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Mark the conversation as processed (moves the content of <code>new_user_input</code> to <code>past_user_inputs</code>) and empties the <code>new_user_input</code> field.</p></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ConversationalPipeline"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ConversationalPipeline</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ConversationalPipeline" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ConversationalPipeline"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/conversational.py#L164" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConversationalPipeline.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConversationalPipeline.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConversationalPipeline.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConversationalPipeline.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConversationalPipeline.modelcard" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConversationalPipeline.modelcard"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code> <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConversationalPipeline.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConversationalPipeline.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConversationalPipeline.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConversationalPipeline.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConversationalPipeline.num_workers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConversationalPipeline.num_workers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConversationalPipeline.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConversationalPipeline.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConversationalPipeline.args_parser" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConversationalPipeline.args_parser"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args_parser</strong> (<a href="/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConversationalPipeline.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConversationalPipeline.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConversationalPipeline.binary_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConversationalPipeline.binary_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConversationalPipeline.min_length_for_response" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConversationalPipeline.min_length_for_response"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>min_length_for_response</strong> (<code>int</code>, <em>optional</em>, defaults to 32) &#x2014; The minimum length (in number of tokens) for a response.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConversationalPipeline.minimum_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConversationalPipeline.minimum_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>minimum_tokens</strong> (<code>int</code>, <em>optional</em>, defaults to 10) &#x2014; The minimum length of tokens to leave for a response.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Multi-turn conversational pipeline.</p> <p>This conversational pipeline can currently be loaded from <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> using the following task identifier: <code>&quot;conversational&quot;</code>.</p> <p>The models that this pipeline can use are models that have been fine-tuned on a multi-turn conversational task, currently: <em>‘microsoft/DialoGPT-small’</em>, <em>‘microsoft/DialoGPT-medium’</em>, <em>‘microsoft/DialoGPT-large’</em>. See the up-to-date list of available models on <a href="https://huggingface.co/models?filter=conversational" rel="nofollow">huggingface.co/models</a>.</p> <p>Usage:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->conversational_pipeline = pipeline(<span class="hljs-string">&quot;conversational&quot;</span>) conversation_1 = Conversation(<span class="hljs-string">&quot;Going to the movies tonight - any suggestions?&quot;</span>) conversation_2 = Conversation(<span class="hljs-string">&quot;What&#x27;s the last book you have read?&quot;</span>) conversational_pipeline([conversation_1, conversation_2]) conversation_1.add_user_input(<span class="hljs-string">&quot;Is it an action movie?&quot;</span>) conversation_2.add_user_input(<span class="hljs-string">&quot;What is the genre of this book?&quot;</span>) conversational_pipeline([conversation_1, conversation_2])<!-- HTML_TAG_END --></pre></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ConversationalPipeline.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.ConversationalPipeline.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ConversationalPipeline.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/conversational.py#L219" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">conversations<span class="opacity-60">: typing.Union[transformers.pipelines.conversational.Conversation, typing.List[transformers.pipelines.conversational.Conversation]]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_workers<span class="opacity-60"> = 0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.Conversation" >Conversation</a> or a list of <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.Conversation" >Conversation</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConversationalPipeline.__call__.conversations" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConversationalPipeline.__call__.conversations"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>conversations</strong> (a <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.Conversation">Conversation</a> or a list of <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.Conversation">Conversation</a>) &#x2014; Conversations to generate responses for.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConversationalPipeline.__call__.clean_up_tokenization_spaces" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConversationalPipeline.__call__.clean_up_tokenization_spaces"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to clean up the potential extra spaces in the text output. generate_kwargs &#x2014; Additional keyword arguments to pass along to the generate method of the model (see the generate method corresponding to your framework <a href="./model#generative-models">here</a>).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.ConversationalPipeline.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.Conversation" >Conversation</a> or a list of <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.Conversation" >Conversation</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Conversation(s) with updated generated responses for those containing a new user input.</p> <!-- HTML_TAG_END --></p></div></div> <p>Generate responses for the conversation(s) given as inputs.</p></div></div> <h3 class="relative group"><a id="transformers.FeatureExtractionPipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FeatureExtractionPipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FeatureExtractionPipeline </span></h3> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FeatureExtractionPipeline"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FeatureExtractionPipeline</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FeatureExtractionPipeline" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FeatureExtractionPipeline"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/feature_extraction.py#L7" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model<span class="opacity-60">: typing.Union[ForwardRef(&#39;PreTrainedModel&#39;), ForwardRef(&#39;TFPreTrainedModel&#39;)]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer<span class="opacity-60">: typing.Optional[transformers.tokenization_utils.PreTrainedTokenizer] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">feature_extractor<span class="opacity-60">: typing.Optional[ForwardRef(&#39;SequenceFeatureExtractor&#39;)] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">modelcard<span class="opacity-60">: typing.Optional[transformers.modelcard.ModelCard] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">framework<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">task<span class="opacity-60">: str = &#39;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args_parser<span class="opacity-60">: ArgumentHandler = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">device<span class="opacity-60">: int = -1</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">binary_output<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FeatureExtractionPipeline.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FeatureExtractionPipeline.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FeatureExtractionPipeline.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FeatureExtractionPipeline.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FeatureExtractionPipeline.modelcard" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FeatureExtractionPipeline.modelcard"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code> <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FeatureExtractionPipeline.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FeatureExtractionPipeline.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FeatureExtractionPipeline.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FeatureExtractionPipeline.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FeatureExtractionPipeline.args_parser" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FeatureExtractionPipeline.args_parser"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args_parser</strong> (<a href="/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FeatureExtractionPipeline.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FeatureExtractionPipeline.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Feature extraction pipeline using no model head. This pipeline extracts the hidden states from the base transformer, which can be used as features in downstream tasks.</p> <p>This feature extraction pipeline can currently be loaded from <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> using the task identifier: <code>&quot;feature-extraction&quot;</code>.</p> <p>All models may be used for this pipeline. See a list of all models, including community-contributed models on <a href="https://huggingface.co/models" rel="nofollow">huggingface.co/models</a>.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FeatureExtractionPipeline.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.FeatureExtractionPipeline.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FeatureExtractionPipeline.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/feature_extraction.py#L69" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>A nested list of <code>float</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FeatureExtractionPipeline.__call__.args" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FeatureExtractionPipeline.__call__.args"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; One or several texts (or one list of texts) to get the features of.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.FeatureExtractionPipeline.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>A nested list of <code>float</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The features computed by the model.</p> <!-- HTML_TAG_END --></p></div></div> <p>Extract the features of the input(s).</p></div></div> <h3 class="relative group"><a id="transformers.FillMaskPipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FillMaskPipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FillMaskPipeline </span></h3> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FillMaskPipeline"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FillMaskPipeline</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FillMaskPipeline" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FillMaskPipeline"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/fill_mask.py#L33" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model<span class="opacity-60">: typing.Union[ForwardRef(&#39;PreTrainedModel&#39;), ForwardRef(&#39;TFPreTrainedModel&#39;)]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer<span class="opacity-60">: typing.Optional[transformers.tokenization_utils.PreTrainedTokenizer] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">feature_extractor<span class="opacity-60">: typing.Optional[ForwardRef(&#39;SequenceFeatureExtractor&#39;)] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">modelcard<span class="opacity-60">: typing.Optional[transformers.modelcard.ModelCard] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">framework<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">task<span class="opacity-60">: str = &#39;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args_parser<span class="opacity-60">: ArgumentHandler = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">device<span class="opacity-60">: int = -1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">binary_output<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FillMaskPipeline.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FillMaskPipeline.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FillMaskPipeline.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FillMaskPipeline.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FillMaskPipeline.modelcard" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FillMaskPipeline.modelcard"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code> <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FillMaskPipeline.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FillMaskPipeline.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FillMaskPipeline.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FillMaskPipeline.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FillMaskPipeline.num_workers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FillMaskPipeline.num_workers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FillMaskPipeline.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FillMaskPipeline.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FillMaskPipeline.args_parser" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FillMaskPipeline.args_parser"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args_parser</strong> (<a href="/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FillMaskPipeline.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FillMaskPipeline.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FillMaskPipeline.binary_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FillMaskPipeline.binary_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FillMaskPipeline.top_k" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FillMaskPipeline.top_k"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>top_k</strong> (<code>int</code>, defaults to 5) &#x2014; The number of predictions to return.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FillMaskPipeline.targets" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FillMaskPipeline.targets"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>targets</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014; When passed, the model will limit the scores to the passed targets instead of looking up in the whole vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting token will be used (with a warning, and that might be slower).<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Masked language modeling prediction pipeline using any <code>ModelWithLMHead</code>. See the <a href="../task_summary#masked-language-modeling">masked language modeling examples</a> for more information.</p> <p>This mask filling pipeline can currently be loaded from <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> using the following task identifier: <code>&quot;fill-mask&quot;</code>.</p> <p>The models that this pipeline can use are models that have been trained with a masked language modeling objective, which includes the bi-directional models in the library. See the up-to-date list of available models on <a href="https://huggingface.co/models?filter=fill-mask" rel="nofollow">huggingface.co/models</a>.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>This pipeline only works for inputs with exactly one token masked. Experimental: We added support for multiple masks. The returned values are raw model output, and correspond to disjoint probabilities where one might expect joint probabilities (See <a href="https://github.com/huggingface/transformers/pull/10222" rel="nofollow">discussion</a>).</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FillMaskPipeline.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.FillMaskPipeline.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FillMaskPipeline.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/fill_mask.py#L204" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>A list or a list of list of <code>dict</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FillMaskPipeline.__call__.args" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FillMaskPipeline.__call__.args"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; One or several texts (or one list of prompts) with masked tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FillMaskPipeline.__call__.targets" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FillMaskPipeline.__call__.targets"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>targets</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014; When passed, the model will limit the scores to the passed targets instead of looking up in the whole vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting token will be used (with a warning, and that might be slower).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FillMaskPipeline.__call__.top_k" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FillMaskPipeline.__call__.top_k"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>top_k</strong> (<code>int</code>, <em>optional</em>) &#x2014; When passed, overrides the number of predictions to return.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.FillMaskPipeline.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>A list or a list of list of <code>dict</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Each result comes as list of dictionaries with the following keys:</p> <ul> <li><strong>sequence</strong> (<code>str</code>) — The corresponding input with the mask token prediction.</li> <li><strong>score</strong> (<code>float</code>) — The corresponding probability.</li> <li><strong>token</strong> (<code>int</code>) — The predicted token id (to replace the masked one).</li> <li><strong>token</strong> (<code>str</code>) — The predicted token (to replace the masked one).</li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>Fill the masked token in the text(s) given as inputs.</p></div></div> <h3 class="relative group"><a id="transformers.ImageClassificationPipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageClassificationPipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ImageClassificationPipeline </span></h3> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ImageClassificationPipeline"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ImageClassificationPipeline</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ImageClassificationPipeline" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ImageClassificationPipeline"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/image_classification.py#L31" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageClassificationPipeline.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageClassificationPipeline.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageClassificationPipeline.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageClassificationPipeline.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageClassificationPipeline.modelcard" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageClassificationPipeline.modelcard"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code> <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageClassificationPipeline.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageClassificationPipeline.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageClassificationPipeline.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageClassificationPipeline.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageClassificationPipeline.num_workers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageClassificationPipeline.num_workers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageClassificationPipeline.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageClassificationPipeline.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageClassificationPipeline.args_parser" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageClassificationPipeline.args_parser"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args_parser</strong> (<a href="/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageClassificationPipeline.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageClassificationPipeline.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageClassificationPipeline.binary_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageClassificationPipeline.binary_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Image classification pipeline using any <code>AutoModelForImageClassification</code>. This pipeline predicts the class of an image.</p> <p>This image classification pipeline can currently be loaded from <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> using the following task identifier: <code>&quot;image-classification&quot;</code>.</p> <p>See the list of available models on <a href="https://huggingface.co/models?filter=image-classification" rel="nofollow">huggingface.co/models</a>.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ImageClassificationPipeline.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.ImageClassificationPipeline.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ImageClassificationPipeline.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/image_classification.py#L58" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">images<span class="opacity-60">: typing.Union[str, typing.List[str], ForwardRef(&#39;Image.Image&#39;), typing.List[ForwardRef(&#39;Image.Image&#39;)]]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageClassificationPipeline.__call__.images" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageClassificationPipeline.__call__.images"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>images</strong> (<code>str</code>, <code>List[str]</code>, <code>PIL.Image</code> or <code>List[PIL.Image]</code>) &#x2014; The pipeline handles three types of images:</p> <ul> <li>A string containing a http link pointing to an image</li> <li>A string containing a local path to an image</li> <li>An image loaded in PIL directly</li> </ul> <p>The pipeline accepts either a single image or a batch of images, which must then be passed as a string. Images in a batch must all be in the same format: all as http links, all as local paths, or all as PIL images.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageClassificationPipeline.__call__.top_k" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageClassificationPipeline.__call__.top_k"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>top_k</strong> (<code>int</code>, <em>optional</em>, defaults to 5) &#x2014; The number of top labels that will be returned by the pipeline. If the provided number is higher than the number of labels available in the model configuration, it will default to the number of labels.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Assign labels to the image(s) passed as inputs.</p></div></div> <h3 class="relative group"><a id="transformers.ImageSegmentationPipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageSegmentationPipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ImageSegmentationPipeline </span></h3> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ImageSegmentationPipeline"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ImageSegmentationPipeline</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ImageSegmentationPipeline" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ImageSegmentationPipeline"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/image_segmentation.py#L34" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageSegmentationPipeline.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageSegmentationPipeline.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageSegmentationPipeline.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageSegmentationPipeline.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageSegmentationPipeline.modelcard" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageSegmentationPipeline.modelcard"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code> <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageSegmentationPipeline.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageSegmentationPipeline.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageSegmentationPipeline.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageSegmentationPipeline.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageSegmentationPipeline.num_workers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageSegmentationPipeline.num_workers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageSegmentationPipeline.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageSegmentationPipeline.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageSegmentationPipeline.args_parser" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageSegmentationPipeline.args_parser"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args_parser</strong> (<a href="/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageSegmentationPipeline.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageSegmentationPipeline.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageSegmentationPipeline.binary_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageSegmentationPipeline.binary_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Image segmentation pipeline using any <code>AutoModelForXXXSegmentation</code>. This pipeline predicts masks of objects and their classes.</p> <p>This image segmentation pipeline can currently be loaded from <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> using the following task identifier: <code>&quot;image-segmentation&quot;</code>.</p> <p>See the list of available models on <a href="https://huggingface.co/models?filter=image-segmentation" rel="nofollow">huggingface.co/models</a>.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ImageSegmentationPipeline.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.ImageSegmentationPipeline.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ImageSegmentationPipeline.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/image_segmentation.py#L69" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageSegmentationPipeline.__call__.images" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageSegmentationPipeline.__call__.images"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>images</strong> (<code>str</code>, <code>List[str]</code>, <code>PIL.Image</code> or <code>List[PIL.Image]</code>) &#x2014; The pipeline handles three types of images:</p> <ul> <li>A string containing an HTTP(S) link pointing to an image</li> <li>A string containing a local path to an image</li> <li>An image loaded in PIL directly</li> </ul> <p>The pipeline accepts either a single image or a batch of images. Images in a batch must all be in the same format: all as HTTP(S) links, all as local paths, or all as PIL images.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageSegmentationPipeline.__call__.threshold" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageSegmentationPipeline.__call__.threshold"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>threshold</strong> (<code>float</code>, <em>optional</em>, defaults to 0.9) &#x2014; The probability necessary to make a prediction.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ImageSegmentationPipeline.__call__.mask_threshold" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ImageSegmentationPipeline.__call__.mask_threshold"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mask_threshold</strong> (<code>float</code>, <em>optional</em>, defaults to 0.5) &#x2014; Threshold to use when turning the predicted masks into binary values.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Perform segmentation (detect masks &amp; classes) in the image(s) passed as inputs.</p></div></div> <h3 class="relative group"><a id="transformers.TokenClassificationPipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>NerPipeline </span></h3> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TokenClassificationPipeline"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TokenClassificationPipeline</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TokenClassificationPipeline" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TokenClassificationPipeline"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/token_classification.py#L86" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args_parser<span class="opacity-60"> = &lt;transformers.pipelines.token_classification.TokenClassificationArgumentHandler object at 0x7f45b52f37c0&gt;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.modelcard" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.modelcard"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code> <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.num_workers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.num_workers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.args_parser" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.args_parser"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args_parser</strong> (<a href="/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.binary_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.binary_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.ignore_labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.ignore_labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ignore_labels</strong> (<code>List[str]</code>, defaults to <code>[&quot;O&quot;]</code>) &#x2014; A list of labels to ignore.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.grouped_entities" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.grouped_entities"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>grouped_entities</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; DEPRECATED, use <code>aggregation_strategy</code> instead. Whether or not to group the tokens corresponding to the same entity together in the predictions or not.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.aggregation_strategy" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.aggregation_strategy"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>aggregation_strategy</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;none&quot;</code>) &#x2014; The strategy to fuse (or not) tokens based on the model prediction.</p> <ul> <li>&#x201C;none&#x201D; : Will simply not do any aggregation and simply return raw results from the model</li> <li>&#x201C;simple&#x201D; : Will attempt to group entities following the default schema. (A, B-TAG), (B, I-TAG), (C, I-TAG), (D, B-TAG2) (E, B-TAG2) will end up being [{&#x201C;word&#x201D;: ABC, &#x201C;entity&#x201D;: &#x201C;TAG&#x201D;}, {&#x201C;word&#x201D;: &#x201C;D&#x201D;, &#x201C;entity&#x201D;: &#x201C;TAG2&#x201D;}, {&#x201C;word&#x201D;: &#x201C;E&#x201D;, &#x201C;entity&#x201D;: &#x201C;TAG2&#x201D;}] Notice that two consecutive B tags will end up as different entities. On word based languages, we might end up splitting words undesirably : Imagine Microsoft being tagged as [{&#x201C;word&#x201D;: &#x201C;Micro&#x201D;, &#x201C;entity&#x201D;: &#x201C;ENTERPRISE&#x201D;}, {&#x201C;word&#x201D;: &#x201C;soft&#x201D;, &#x201C;entity&#x201D;: &#x201C;NAME&#x201D;}]. Look for FIRST, MAX, AVERAGE for ways to mitigate that and disambiguate words (on languages that support that meaning, which is basically tokens separated by a space). These mitigations will only work on real words, &#x201C;New york&#x201D; might still be tagged with two different entities.</li> <li>&#x201C;first&#x201D; : (works only on word based models) Will use the <code>SIMPLE</code> strategy except that words, cannot end up with different tags. Words will simply use the tag of the first token of the word when there is ambiguity.</li> <li>&#x201C;average&#x201D; : (works only on word based models) Will use the <code>SIMPLE</code> strategy except that words, cannot end up with different tags. scores will be averaged first across tokens, and then the maximum label is applied.</li> <li>&#x201C;max&#x201D; : (works only on word based models) Will use the <code>SIMPLE</code> strategy except that words, cannot end up with different tags. Word entity will simply be the token with the maximum score.</li> </ul><!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Named Entity Recognition pipeline using any <code>ModelForTokenClassification</code>. See the <a href="../task_summary#named-entity-recognition">named entity recognition examples</a> for more information.</p> <p>This token recognition pipeline can currently be loaded from <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> using the following task identifier: <code>&quot;ner&quot;</code> (for predicting the classes of tokens in a sequence: person, organisation, location or miscellaneous).</p> <p>The models that this pipeline can use are models that have been fine-tuned on a token classification task. See the up-to-date list of available models on <a href="https://huggingface.co/models?filter=token-classification" rel="nofollow">huggingface.co/models</a>.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TokenClassificationPipeline.aggregate_words"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>aggregate_words</span></h4><!-- HTML_TAG_END --> <a id="transformers.TokenClassificationPipeline.aggregate_words" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TokenClassificationPipeline.aggregate_words"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/token_classification.py#L366" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">entities<span class="opacity-60">: typing.List[dict]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">aggregation_strategy<span class="opacity-60">: AggregationStrategy</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Override tokens from a given word that disagree to force agreement on word boundaries.</p> <p>Example: micro|soft| com|pany| B-ENT I-NAME I-ENT I-ENT will be rewritten with first strategy as microsoft| company| B-ENT I-ENT</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TokenClassificationPipeline.gather_pre_entities"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>gather_pre_entities</span></h4><!-- HTML_TAG_END --> <a id="transformers.TokenClassificationPipeline.gather_pre_entities" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TokenClassificationPipeline.gather_pre_entities"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/token_classification.py#L252" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sentence<span class="opacity-60">: str</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">offset_mapping<span class="opacity-60">: typing.Union[typing.List[typing.Tuple[int, int]], NoneType]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">special_tokens_mask<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">aggregation_strategy<span class="opacity-60">: AggregationStrategy</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Fuse various numpy arrays into dicts with all the information needed for aggregation</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TokenClassificationPipeline.group_entities"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>group_entities</span></h4><!-- HTML_TAG_END --> <a id="transformers.TokenClassificationPipeline.group_entities" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TokenClassificationPipeline.group_entities"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/token_classification.py#L428" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">entities<span class="opacity-60">: typing.List[dict]</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.group_entities.entities" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.group_entities.entities"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>entities</strong> (<code>dict</code>) &#x2014; The entities predicted by the pipeline.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Find and group together the adjacent tokens with the same entity predicted.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TokenClassificationPipeline.group_sub_entities"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>group_sub_entities</span></h4><!-- HTML_TAG_END --> <a id="transformers.TokenClassificationPipeline.group_sub_entities" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TokenClassificationPipeline.group_sub_entities"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/token_classification.py#L393" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">entities<span class="opacity-60">: typing.List[dict]</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.group_sub_entities.entities" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.group_sub_entities.entities"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>entities</strong> (<code>dict</code>) &#x2014; The entities predicted by the pipeline.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Group together the adjacent tokens with the same entity predicted.</p></div></div> <p>See <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.TokenClassificationPipeline">TokenClassificationPipeline</a> for all details.</p> <h3 class="relative group"><a id="transformers.ObjectDetectionPipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ObjectDetectionPipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ObjectDetectionPipeline </span></h3> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ObjectDetectionPipeline"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ObjectDetectionPipeline</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ObjectDetectionPipeline" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ObjectDetectionPipeline"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/object_detection.py#L25" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ObjectDetectionPipeline.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ObjectDetectionPipeline.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ObjectDetectionPipeline.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ObjectDetectionPipeline.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ObjectDetectionPipeline.modelcard" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ObjectDetectionPipeline.modelcard"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code> <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ObjectDetectionPipeline.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ObjectDetectionPipeline.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ObjectDetectionPipeline.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ObjectDetectionPipeline.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ObjectDetectionPipeline.num_workers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ObjectDetectionPipeline.num_workers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ObjectDetectionPipeline.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ObjectDetectionPipeline.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ObjectDetectionPipeline.args_parser" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ObjectDetectionPipeline.args_parser"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args_parser</strong> (<a href="/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ObjectDetectionPipeline.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ObjectDetectionPipeline.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ObjectDetectionPipeline.binary_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ObjectDetectionPipeline.binary_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Object detection pipeline using any <code>AutoModelForObjectDetection</code>. This pipeline predicts bounding boxes of objects and their classes.</p> <p>This object detection pipeline can currently be loaded from <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> using the following task identifier: <code>&quot;object-detection&quot;</code>.</p> <p>See the list of available models on <a href="https://huggingface.co/models?filter=object-detection" rel="nofollow">huggingface.co/models</a>.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ObjectDetectionPipeline.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.ObjectDetectionPipeline.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ObjectDetectionPipeline.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/object_detection.py#L51" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ObjectDetectionPipeline.__call__.images" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ObjectDetectionPipeline.__call__.images"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>images</strong> (<code>str</code>, <code>List[str]</code>, <code>PIL.Image</code> or <code>List[PIL.Image]</code>) &#x2014; The pipeline handles three types of images:</p> <ul> <li>A string containing an HTTP(S) link pointing to an image</li> <li>A string containing a local path to an image</li> <li>An image loaded in PIL directly</li> </ul> <p>The pipeline accepts either a single image or a batch of images. Images in a batch must all be in the same format: all as HTTP(S) links, all as local paths, or all as PIL images.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ObjectDetectionPipeline.__call__.threshold" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ObjectDetectionPipeline.__call__.threshold"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>threshold</strong> (<code>float</code>, <em>optional</em>, defaults to 0.9) &#x2014; The probability necessary to make a prediction.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Detect objects (bounding boxes &amp; classes) in the image(s) passed as inputs.</p></div></div> <h3 class="relative group"><a id="transformers.QuestionAnsweringPipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>QuestionAnsweringPipeline </span></h3> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.QuestionAnsweringPipeline"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">QuestionAnsweringPipeline</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.QuestionAnsweringPipeline" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.QuestionAnsweringPipeline"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/question_answering.py#L102" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model<span class="opacity-60">: typing.Union[ForwardRef(&#39;PreTrainedModel&#39;), ForwardRef(&#39;TFPreTrainedModel&#39;)]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer<span class="opacity-60">: PreTrainedTokenizer</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">modelcard<span class="opacity-60">: typing.Optional[transformers.modelcard.ModelCard] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">framework<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">device<span class="opacity-60">: int = -1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">task<span class="opacity-60">: str = &#39;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.modelcard" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.modelcard"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code> <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.num_workers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.num_workers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.args_parser" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.args_parser"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args_parser</strong> (<a href="/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.binary_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.binary_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Question Answering pipeline using any <code>ModelForQuestionAnswering</code>. See the <a href="../task_summary#question-answering">question answering examples</a> for more information.</p> <p>This question answering pipeline can currently be loaded from <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> using the following task identifier: <code>&quot;question-answering&quot;</code>.</p> <p>The models that this pipeline can use are models that have been fine-tuned on a question answering task. See the up-to-date list of available models on <a href="https://huggingface.co/models?filter=question-answering" rel="nofollow">huggingface.co/models</a>.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.QuestionAnsweringPipeline.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.QuestionAnsweringPipeline.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.QuestionAnsweringPipeline.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/question_answering.py#L205" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>A <code>dict</code> or a list of <code>dict</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.__call__.args" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.__call__.args"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args</strong> (<code>SquadExample</code>or a list of <code>SquadExample</code> &#x2014; One or several <code>SquadExample</code>containing the question and context.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.__call__.X" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.__call__.X"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>X</strong> (<code>SquadExample</code>or a list of <code>SquadExample</code> <em>optional</em>) &#x2014; One or several <code>SquadExample</code>containing the question and context (will be treated the same way as if passed as the first positional argument).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.__call__.data" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.__call__.data"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>data</strong> (<code>SquadExample</code>or a list of <code>SquadExample</code> <em>optional</em>) &#x2014; One or several <code>SquadExample</code>containing the question and context (will be treated the same way as if passed as the first positional argument).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.__call__.question" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.__call__.question"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>question</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; One or several question(s) (must be used in conjunction with the <code>context</code> argument).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.__call__.context" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.__call__.context"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>context</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; One or several context(s) associated with the question(s) (must be used in conjunction with the <code>question</code> argument).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.__call__.topk" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.__call__.topk"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>topk</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; The number of answers to return (will be chosen by order of likelihood). Note that we return less than topk answers if there are not enough options available within the context.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.__call__.doc_stride" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.__call__.doc_stride"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>doc_stride</strong> (<code>int</code>, <em>optional</em>, defaults to 128) &#x2014; If the context is too long to fit with the question for the model, it will be split in several chunks with some overlap. This argument controls the size of that overlap.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.__call__.max_answer_len" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.__call__.max_answer_len"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_answer_len</strong> (<code>int</code>, <em>optional</em>, defaults to 15) &#x2014; The maximum length of predicted answers (e.g., only answers with a shorter length are considered).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.__call__.max_seq_len" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.__call__.max_seq_len"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_seq_len</strong> (<code>int</code>, <em>optional</em>, defaults to 384) &#x2014; The maximum length of the total sentence (context + question) after tokenization. The context will be split in several chunks (using <code>doc_stride</code>) if needed.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.__call__.max_question_len" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.__call__.max_question_len"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_question_len</strong> (<code>int</code>, <em>optional</em>, defaults to 64) &#x2014; The maximum length of the question after tokenization. It will be truncated if needed.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.__call__.handle_impossible_answer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.__call__.handle_impossible_answer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>handle_impossible_answer</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not we accept impossible as an answer.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.QuestionAnsweringPipeline.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>A <code>dict</code> or a list of <code>dict</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Each result comes as a dictionary with the following keys:</p> <ul> <li><strong>score</strong> (<code>float</code>) — The probability associated to the answer.</li> <li><strong>start</strong> (<code>int</code>) — The character start index of the answer (in the tokenized version of the input).</li> <li><strong>end</strong> (<code>int</code>) — The character end index of the answer (in the tokenized version of the input).</li> <li><strong>answer</strong> (<code>str</code>) — The answer to the question.</li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>Answer the question(s) given as inputs by using the context(s).</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.QuestionAnsweringPipeline.create_sample"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>create_sample</span></h4><!-- HTML_TAG_END --> <a id="transformers.QuestionAnsweringPipeline.create_sample" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.QuestionAnsweringPipeline.create_sample"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/question_answering.py#L143" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">question<span class="opacity-60">: typing.Union[str, typing.List[str]]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">context<span class="opacity-60">: typing.Union[str, typing.List[str]]</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.create_sample.question" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.create_sample.question"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>question</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; The question(s) asked.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.create_sample.context" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.create_sample.context"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>context</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; The context(s) in which we will look for the answer.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>QuestionAnsweringPipeline leverages the <code>SquadExample</code>internally. This helper method encapsulate all the logic for converting question(s) and context(s) to <code>SquadExample</code></p> <p>We currently support extractive question answering.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.QuestionAnsweringPipeline.decode"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>decode</span></h4><!-- HTML_TAG_END --> <a id="transformers.QuestionAnsweringPipeline.decode" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.QuestionAnsweringPipeline.decode"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/question_answering.py#L481" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">end<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">topk<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_answer_len<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">undesired_tokens<span class="opacity-60">: ndarray</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.decode.start" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.decode.start"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start</strong> (<code>np.ndarray</code>) &#x2014; Individual start probabilities for each token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.decode.end" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.decode.end"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>end</strong> (<code>np.ndarray</code>) &#x2014; Individual end probabilities for each token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.decode.topk" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.decode.topk"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>topk</strong> (<code>int</code>) &#x2014; Indicates how many possible answer span(s) to extract from the model output.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.decode.max_answer_len" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.decode.max_answer_len"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_answer_len</strong> (<code>int</code>) &#x2014; Maximum size of the answer to extract from the model&#x2019;s output.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.decode.undesired_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.decode.undesired_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>undesired_tokens</strong> (<code>np.ndarray</code>) &#x2014; Mask determining tokens that can be part of the answer<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Take the output of any <code>ModelForQuestionAnswering</code> and will generate probabilities for each span to be the actual answer.</p> <p>In addition, it filters out some unwanted/impossible cases like answer len being greater than max_answer_len or answer end position being before the starting position. The method supports output the k-best answer through the topk argument.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.QuestionAnsweringPipeline.span_to_answer"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>span_to_answer</span></h4><!-- HTML_TAG_END --> <a id="transformers.QuestionAnsweringPipeline.span_to_answer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.QuestionAnsweringPipeline.span_to_answer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/question_answering.py#L530" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text<span class="opacity-60">: str</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">end<span class="opacity-60">: int</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>Dictionary like `{‘answer’</span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.span_to_answer.text" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.span_to_answer.text"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text</strong> (<code>str</code>) &#x2014; The actual context to extract the answer from.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.span_to_answer.start" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.span_to_answer.start"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start</strong> (<code>int</code>) &#x2014; The answer starting token index.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.QuestionAnsweringPipeline.span_to_answer.end" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.QuestionAnsweringPipeline.span_to_answer.end"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>end</strong> (<code>int</code>) &#x2014; The answer end token index.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.QuestionAnsweringPipeline.span_to_answer.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>Dictionary like `{‘answer’</p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>str, ‘start’: int, ‘end’: int}`</p> <!-- HTML_TAG_END --></p></div></div> <p>When decoding from token probabilities, this method maps token indexes to actual word in the initial context.</p></div></div> <h3 class="relative group"><a id="transformers.SummarizationPipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SummarizationPipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>SummarizationPipeline </span></h3> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.SummarizationPipeline"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">SummarizationPipeline</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.SummarizationPipeline" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.SummarizationPipeline"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/text2text_generation.py#L186" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SummarizationPipeline.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SummarizationPipeline.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SummarizationPipeline.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SummarizationPipeline.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SummarizationPipeline.modelcard" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SummarizationPipeline.modelcard"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code> <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SummarizationPipeline.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SummarizationPipeline.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SummarizationPipeline.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SummarizationPipeline.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SummarizationPipeline.num_workers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SummarizationPipeline.num_workers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SummarizationPipeline.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SummarizationPipeline.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SummarizationPipeline.args_parser" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SummarizationPipeline.args_parser"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args_parser</strong> (<a href="/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SummarizationPipeline.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SummarizationPipeline.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SummarizationPipeline.binary_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SummarizationPipeline.binary_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Summarize news articles and other documents.</p> <p>This summarizing pipeline can currently be loaded from <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> using the following task identifier: <code>&quot;summarization&quot;</code>.</p> <p>The models that this pipeline can use are models that have been fine-tuned on a summarization task, which is currently, ’<em>bart-large-cnn</em>’, ’<em>t5-small</em>’, ’<em>t5-base</em>’, ’<em>t5-large</em>’, ’<em>t5-3b</em>’, ’<em>t5-11b</em>’. See the up-to-date list of available models on <a href="https://huggingface.co/models?filter=summarization" rel="nofollow">huggingface.co/models</a>.</p> <p>Usage:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-comment"># use bart in pytorch</span> summarizer = pipeline(<span class="hljs-string">&quot;summarization&quot;</span>) summarizer(<span class="hljs-string">&quot;An apple a day, keeps the doctor away&quot;</span>, min_length=<span class="hljs-number">5</span>, max_length=<span class="hljs-number">20</span>) <span class="hljs-comment"># use t5 in tf</span> summarizer = pipeline(<span class="hljs-string">&quot;summarization&quot;</span>, model=<span class="hljs-string">&quot;t5-base&quot;</span>, tokenizer=<span class="hljs-string">&quot;t5-base&quot;</span>, framework=<span class="hljs-string">&quot;tf&quot;</span>) summarizer(<span class="hljs-string">&quot;An apple a day, keeps the doctor away&quot;</span>, min_length=<span class="hljs-number">5</span>, max_length=<span class="hljs-number">20</span>)<!-- HTML_TAG_END --></pre></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.SummarizationPipeline.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.SummarizationPipeline.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.SummarizationPipeline.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/text2text_generation.py#L212" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>A list or a list of list of <code>dict</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SummarizationPipeline.__call__.documents" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SummarizationPipeline.__call__.documents"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>documents</strong> (<em>str</em> or <code>List[str]</code>) &#x2014; One or several articles (or one list of articles) to summarize.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SummarizationPipeline.__call__.return_text" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SummarizationPipeline.__call__.return_text"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_text</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to include the decoded texts in the outputs<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SummarizationPipeline.__call__.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SummarizationPipeline.__call__.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to include the tensors of predictions (as token indices) in the outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SummarizationPipeline.__call__.clean_up_tokenization_spaces" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SummarizationPipeline.__call__.clean_up_tokenization_spaces"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to clean up the potential extra spaces in the text output. generate_kwargs &#x2014; Additional keyword arguments to pass along to the generate method of the model (see the generate method corresponding to your framework <a href="./model#generative-models">here</a>).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.SummarizationPipeline.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>A list or a list of list of <code>dict</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Each result comes as a dictionary with the following keys:</p> <ul> <li><strong>summary_text</strong> (<code>str</code>, present when <code>return_text=True</code>) — The summary of the corresponding input.</li> <li><strong>summary_token_ids</strong> (<code>torch.Tensor</code> or <code>tf.Tensor</code>, present when <code>return_tensors=True</code>) — The token ids of the summary.</li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>Summarize the text(s) given as inputs.</p></div></div> <h3 class="relative group"><a id="transformers.TableQuestionAnsweringPipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TableQuestionAnsweringPipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TableQuestionAnsweringPipeline </span></h3> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TableQuestionAnsweringPipeline"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TableQuestionAnsweringPipeline</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TableQuestionAnsweringPipeline" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TableQuestionAnsweringPipeline"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/table_question_answering.py#L83" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args_parser<span class="opacity-60"> = &lt;transformers.pipelines.table_question_answering.TableQuestionAnsweringArgumentHandler object at 0x7f45b52d1cd0&gt;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TableQuestionAnsweringPipeline.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TableQuestionAnsweringPipeline.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TableQuestionAnsweringPipeline.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TableQuestionAnsweringPipeline.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TableQuestionAnsweringPipeline.modelcard" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TableQuestionAnsweringPipeline.modelcard"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code> <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TableQuestionAnsweringPipeline.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TableQuestionAnsweringPipeline.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TableQuestionAnsweringPipeline.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TableQuestionAnsweringPipeline.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TableQuestionAnsweringPipeline.num_workers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TableQuestionAnsweringPipeline.num_workers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TableQuestionAnsweringPipeline.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TableQuestionAnsweringPipeline.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TableQuestionAnsweringPipeline.args_parser" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TableQuestionAnsweringPipeline.args_parser"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args_parser</strong> (<a href="/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TableQuestionAnsweringPipeline.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TableQuestionAnsweringPipeline.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TableQuestionAnsweringPipeline.binary_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TableQuestionAnsweringPipeline.binary_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Table Question Answering pipeline using a <code>ModelForTableQuestionAnswering</code>. This pipeline is only available in PyTorch.</p> <p>This tabular question answering pipeline can currently be loaded from <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> using the following task identifier: <code>&quot;table-question-answering&quot;</code>.</p> <p>The models that this pipeline can use are models that have been fine-tuned on a tabular question answering task. See the up-to-date list of available models on <a href="https://huggingface.co/models?filter=table-question-answering" rel="nofollow">huggingface.co/models</a>.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TableQuestionAnsweringPipeline.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.TableQuestionAnsweringPipeline.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TableQuestionAnsweringPipeline.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/table_question_answering.py#L244" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>A dictionary or a list of dictionaries containing results</span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TableQuestionAnsweringPipeline.__call__.table" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TableQuestionAnsweringPipeline.__call__.table"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>table</strong> (<code>pd.DataFrame</code> or <code>Dict</code>) &#x2014; Pandas DataFrame or dictionary that will be converted to a DataFrame containing all the table values. See above for an example of dictionary.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TableQuestionAnsweringPipeline.__call__.query" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TableQuestionAnsweringPipeline.__call__.query"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>query</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; Query or list of queries that will be sent to the model alongside the table.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TableQuestionAnsweringPipeline.__call__.sequential" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TableQuestionAnsweringPipeline.__call__.sequential"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequential</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to do inference sequentially or as a batch. Batching is faster, but models like SQA require the inference to be done sequentially to extract relations within sequences, given their conversational nature.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TableQuestionAnsweringPipeline.__call__.padding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TableQuestionAnsweringPipeline.__call__.padding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TableQuestionAnsweringPipeline.__call__.truncation" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TableQuestionAnsweringPipeline.__call__.truncation"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>truncation</strong> (<code>bool</code>, <code>str</code> or <code>TapasTruncationStrategy</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;drop_rows_to_fit&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate row by row, removing rows from the table.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul><!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TableQuestionAnsweringPipeline.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>A dictionary or a list of dictionaries containing results</p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Each result is a dictionary with the following keys:</p> <ul> <li><strong>answer</strong> (<code>str</code>) — The answer of the query given the table. If there is an aggregator, the answer will be preceded by <code>AGGREGATOR &gt;</code>.</li> <li><strong>coordinates</strong> (<code>List[Tuple[int, int]]</code>) — Coordinates of the cells of the answers.</li> <li><strong>cells</strong> (<code>List[str]</code>) — List of strings made up of the answer cell values.</li> <li><strong>aggregator</strong> (<code>str</code>) — If the model has an aggregator, this returns the aggregator.</li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>Answers queries according to a table. The pipeline accepts several types of inputs which are detailed below:</p> <ul><li><code>pipeline(table, query)</code></li> <li><code>pipeline(table, [query])</code></li> <li><code>pipeline(table=table, query=query)</code></li> <li><code>pipeline(table=table, query=[query])</code></li> <li><code>pipeline({&quot;table&quot;: table, &quot;query&quot;: query})</code></li> <li><code>pipeline({&quot;table&quot;: table, &quot;query&quot;: [query]})</code></li> <li><code>pipeline([{&quot;table&quot;: table, &quot;query&quot;: query}, {&quot;table&quot;: table, &quot;query&quot;: query}])</code></li></ul> <p>The <code>table</code> argument should be a dict or a DataFrame built from that dict, containing the whole table:</p> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->data = { <span class="hljs-string">&quot;actors&quot;</span>: [<span class="hljs-string">&quot;brad pitt&quot;</span>, <span class="hljs-string">&quot;leonardo di caprio&quot;</span>, <span class="hljs-string">&quot;george clooney&quot;</span>], <span class="hljs-string">&quot;age&quot;</span>: [<span class="hljs-string">&quot;56&quot;</span>, <span class="hljs-string">&quot;45&quot;</span>, <span class="hljs-string">&quot;59&quot;</span>], <span class="hljs-string">&quot;number of movies&quot;</span>: [<span class="hljs-string">&quot;87&quot;</span>, <span class="hljs-string">&quot;53&quot;</span>, <span class="hljs-string">&quot;69&quot;</span>], <span class="hljs-string">&quot;date of birth&quot;</span>: [<span class="hljs-string">&quot;7 february 1967&quot;</span>, <span class="hljs-string">&quot;10 june 1996&quot;</span>, <span class="hljs-string">&quot;28 november 1967&quot;</span>], }<!-- HTML_TAG_END --></pre></div> <p>This dictionary can be passed in as such, or can be converted to a pandas DataFrame:</p> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">import</span> pandas <span class="hljs-keyword">as</span> pd table = pd.DataFrame.from_dict(data)<!-- HTML_TAG_END --></pre></div></div></div> <h3 class="relative group"><a id="transformers.TextClassificationPipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextClassificationPipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TextClassificationPipeline </span></h3> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TextClassificationPipeline"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TextClassificationPipeline</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TextClassificationPipeline" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TextClassificationPipeline"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/text_classification.py#L47" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TextClassificationPipeline.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextClassificationPipeline.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TextClassificationPipeline.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextClassificationPipeline.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TextClassificationPipeline.modelcard" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextClassificationPipeline.modelcard"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code> <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TextClassificationPipeline.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextClassificationPipeline.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TextClassificationPipeline.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextClassificationPipeline.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TextClassificationPipeline.num_workers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextClassificationPipeline.num_workers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TextClassificationPipeline.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextClassificationPipeline.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TextClassificationPipeline.args_parser" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextClassificationPipeline.args_parser"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args_parser</strong> (<a href="/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TextClassificationPipeline.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextClassificationPipeline.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TextClassificationPipeline.binary_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextClassificationPipeline.binary_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TextClassificationPipeline.return_all_scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextClassificationPipeline.return_all_scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_all_scores</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to return all prediction scores or just the one of the predicted class.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TextClassificationPipeline.function_to_apply" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextClassificationPipeline.function_to_apply"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>function_to_apply</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;default&quot;</code>) &#x2014; The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:</p> <ul> <li><code>&quot;default&quot;</code>: if the model has a single label, will apply the sigmoid function on the output. If the model has several labels, will apply the softmax function on the output.</li> <li><code>&quot;sigmoid&quot;</code>: Applies the sigmoid function on the output.</li> <li><code>&quot;softmax&quot;</code>: Applies the softmax function on the output.</li> <li><code>&quot;none&quot;</code>: Does not apply any function on the output.</li> </ul><!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Text classification pipeline using any <code>ModelForSequenceClassification</code>. See the <a href="../task_summary#sequence-classification">sequence classification examples</a> for more information.</p> <p>This text classification pipeline can currently be loaded from <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> using the following task identifier: <code>&quot;sentiment-analysis&quot;</code> (for classifying sequences according to positive or negative sentiments).</p> <p>If multiple classification labels are available (<code>model.config.num_labels &gt;= 2</code>), the pipeline will run a softmax over the results. If there is a single label, the pipeline will run a sigmoid over the result.</p> <p>The models that this pipeline can use are models that have been fine-tuned on a sequence classification task. See the up-to-date list of available models on <a href="https://huggingface.co/models?filter=text-classification" rel="nofollow">huggingface.co/models</a>.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TextClassificationPipeline.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.TextClassificationPipeline.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TextClassificationPipeline.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/text_classification.py#L92" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>A list or a list of list of <code>dict</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TextClassificationPipeline.__call__.args" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextClassificationPipeline.__call__.args"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; One or several texts (or one list of prompts) to classify.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TextClassificationPipeline.__call__.return_all_scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextClassificationPipeline.__call__.return_all_scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_all_scores</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to return scores for all labels.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TextClassificationPipeline.__call__.function_to_apply" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextClassificationPipeline.__call__.function_to_apply"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>function_to_apply</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;default&quot;</code>) &#x2014; The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:</p> <p>If this argument is not specified, then it will apply the following functions according to the number of labels:</p> <ul> <li>If the model has a single label, will apply the sigmoid function on the output.</li> <li>If the model has several labels, will apply the softmax function on the output.</li> </ul> <p>Possible values are:</p> <ul> <li><code>&quot;sigmoid&quot;</code>: Applies the sigmoid function on the output.</li> <li><code>&quot;softmax&quot;</code>: Applies the softmax function on the output.</li> <li><code>&quot;none&quot;</code>: Does not apply any function on the output.</li> </ul><!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TextClassificationPipeline.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>A list or a list of list of <code>dict</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Each result comes as list of dictionaries with the following keys:</p> <ul> <li><strong>label</strong> (<code>str</code>) — The label predicted.</li> <li><strong>score</strong> (<code>float</code>) — The corresponding probability.</li> </ul> <p>If <code>self.return_all_scores=True</code>, one such dictionary is returned per label.</p> <!-- HTML_TAG_END --></p></div></div> <p>Classify the text(s) given as inputs.</p></div></div> <h3 class="relative group"><a id="transformers.TextGenerationPipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextGenerationPipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TextGenerationPipeline </span></h3> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TextGenerationPipeline"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TextGenerationPipeline</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TextGenerationPipeline" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TextGenerationPipeline"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/text_generation.py#L20" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TextGenerationPipeline.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextGenerationPipeline.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TextGenerationPipeline.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextGenerationPipeline.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TextGenerationPipeline.modelcard" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextGenerationPipeline.modelcard"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code> <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TextGenerationPipeline.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextGenerationPipeline.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TextGenerationPipeline.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextGenerationPipeline.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TextGenerationPipeline.num_workers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextGenerationPipeline.num_workers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TextGenerationPipeline.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextGenerationPipeline.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TextGenerationPipeline.args_parser" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextGenerationPipeline.args_parser"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args_parser</strong> (<a href="/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TextGenerationPipeline.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextGenerationPipeline.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TextGenerationPipeline.binary_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextGenerationPipeline.binary_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Language generation pipeline using any <code>ModelWithLMHead</code>. This pipeline predicts the words that will follow a specified text prompt.</p> <p>This language generation pipeline can currently be loaded from <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> using the following task identifier: <code>&quot;text-generation&quot;</code>.</p> <p>The models that this pipeline can use are models that have been trained with an autoregressive language modeling objective, which includes the uni-directional models in the library (e.g. gpt2). See the list of available models on <a href="https://huggingface.co/models?filter=text-generation" rel="nofollow">huggingface.co/models</a>.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TextGenerationPipeline.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.TextGenerationPipeline.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TextGenerationPipeline.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/text_generation.py#L136" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text_inputs<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>A list or a list of list of <code>dict</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TextGenerationPipeline.__call__.args" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextGenerationPipeline.__call__.args"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; One or several prompts (or one list of prompts) to complete.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TextGenerationPipeline.__call__.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextGenerationPipeline.__call__.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to include the tensors of predictions (as token indices) in the outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TextGenerationPipeline.__call__.return_text" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextGenerationPipeline.__call__.return_text"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_text</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to include the decoded texts in the outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TextGenerationPipeline.__call__.return_full_text" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextGenerationPipeline.__call__.return_full_text"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_full_text</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>False</code> only added text is returned, otherwise the full text is returned Only meaningful if <em>return_text</em> is set to True.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TextGenerationPipeline.__call__.clean_up_tokenization_spaces" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextGenerationPipeline.__call__.clean_up_tokenization_spaces"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to clean up the potential extra spaces in the text output.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TextGenerationPipeline.__call__.prefix" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextGenerationPipeline.__call__.prefix"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>prefix</strong> (<code>str</code>, <em>optional</em>) &#x2014; Prefix added to prompt.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TextGenerationPipeline.__call__.handle_long_generation" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TextGenerationPipeline.__call__.handle_long_generation"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>handle_long_generation</strong> (<code>str</code>, <em>optional</em>) &#x2014; By default, this pipelines does not handle long generation (ones that exceed in one form or the other the model maximum length). There is no perfect way to adress this (more info :<a href="https://github.com/huggingface/transformers/issues/14033#issuecomment-948385227" rel="nofollow">https://github.com/huggingface/transformers/issues/14033#issuecomment-948385227</a>). This provides common strategies to work around that problem depending on your use case.</p> <ul> <li><code>None</code> : default strategy where nothing in particular happens</li> <li><code>&quot;hole&quot;</code>: Truncates left of input, and leaves a gap wide enough to let generation happen (might truncate a lot of the prompt and not suitable when generation exceed the model capacity)</li> </ul><!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TextGenerationPipeline.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>A list or a list of list of <code>dict</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Each result comes as a dictionary with the following keys:</p> <ul> <li><strong>generated_text</strong> (<code>str</code>, present when <code>return_text=True</code>) — The generated text.</li> <li><strong>generated_token_ids</strong> (<code>torch.Tensor</code> or <code>tf.Tensor</code>, present when <code>return_tensors=True</code>) — The token ids of the generated text.</li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>Complete the prompt(s) given as inputs.</p></div></div> <h3 class="relative group"><a id="transformers.Text2TextGenerationPipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Text2TextGenerationPipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Text2TextGenerationPipeline </span></h3> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Text2TextGenerationPipeline"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">Text2TextGenerationPipeline</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.Text2TextGenerationPipeline" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Text2TextGenerationPipeline"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/text2text_generation.py#L26" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Text2TextGenerationPipeline.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Text2TextGenerationPipeline.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Text2TextGenerationPipeline.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Text2TextGenerationPipeline.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Text2TextGenerationPipeline.modelcard" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Text2TextGenerationPipeline.modelcard"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code> <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Text2TextGenerationPipeline.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Text2TextGenerationPipeline.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Text2TextGenerationPipeline.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Text2TextGenerationPipeline.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Text2TextGenerationPipeline.num_workers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Text2TextGenerationPipeline.num_workers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Text2TextGenerationPipeline.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Text2TextGenerationPipeline.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Text2TextGenerationPipeline.args_parser" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Text2TextGenerationPipeline.args_parser"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args_parser</strong> (<a href="/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Text2TextGenerationPipeline.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Text2TextGenerationPipeline.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Text2TextGenerationPipeline.binary_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Text2TextGenerationPipeline.binary_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Pipeline for text to text generation using seq2seq models.</p> <p>This Text2TextGenerationPipeline pipeline can currently be loaded from <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> using the following task identifier: <code>&quot;text2text-generation&quot;</code>.</p> <p>The models that this pipeline can use are models that have been fine-tuned on a translation task. See the up-to-date list of available models on <a href="https://huggingface.co/models?filter=text2text-generation" rel="nofollow">huggingface.co/models</a>.</p> <p>Usage:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->text2text_generator = pipeline(<span class="hljs-string">&quot;text2text-generation&quot;</span>) text2text_generator(<span class="hljs-string">&quot;question: What is 42 ? context: 42 is the answer to life, the universe and everything&quot;</span>)<!-- HTML_TAG_END --></pre></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Text2TextGenerationPipeline.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.Text2TextGenerationPipeline.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Text2TextGenerationPipeline.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/text2text_generation.py#L109" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>A list or a list of list of <code>dict</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Text2TextGenerationPipeline.__call__.args" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Text2TextGenerationPipeline.__call__.args"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; Input text for the encoder.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Text2TextGenerationPipeline.__call__.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Text2TextGenerationPipeline.__call__.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to include the tensors of predictions (as token indices) in the outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Text2TextGenerationPipeline.__call__.return_text" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Text2TextGenerationPipeline.__call__.return_text"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_text</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to include the decoded texts in the outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Text2TextGenerationPipeline.__call__.clean_up_tokenization_spaces" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Text2TextGenerationPipeline.__call__.clean_up_tokenization_spaces"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to clean up the potential extra spaces in the text output.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Text2TextGenerationPipeline.__call__.truncation" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Text2TextGenerationPipeline.__call__.truncation"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>truncation</strong> (<code>TruncationStrategy</code>, <em>optional</em>, defaults to <code>TruncationStrategy.DO_NOT_TRUNCATE</code>) &#x2014; The truncation strategy for the tokenization within the pipeline. <code>TruncationStrategy.DO_NOT_TRUNCATE</code> (default) will never truncate, but it is sometimes desirable to truncate the input to fit the model&#x2019;s max_length instead of throwing an error down the line. generate_kwargs &#x2014; Additional keyword arguments to pass along to the generate method of the model (see the generate method corresponding to your framework <a href="./model#generative-models">here</a>).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.Text2TextGenerationPipeline.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>A list or a list of list of <code>dict</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Each result comes as a dictionary with the following keys:</p> <ul> <li><strong>generated_text</strong> (<code>str</code>, present when <code>return_text=True</code>) — The generated text.</li> <li><strong>generated_token_ids</strong> (<code>torch.Tensor</code> or <code>tf.Tensor</code>, present when <code>return_tensors=True</code>) — The token ids of the generated text.</li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>Generate the output text(s) using text(s) given as inputs.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Text2TextGenerationPipeline.check_inputs"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>check_inputs</span></h4><!-- HTML_TAG_END --> <a id="transformers.Text2TextGenerationPipeline.check_inputs" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Text2TextGenerationPipeline.check_inputs"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/text2text_generation.py#L82" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_length<span class="opacity-60">: int</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">min_length<span class="opacity-60">: int</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Checks whether there might be something wrong with given input with regard to the model.</p></div></div> <h3 class="relative group"><a id="transformers.TokenClassificationPipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TokenClassificationPipeline </span></h3> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TokenClassificationPipeline"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TokenClassificationPipeline</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TokenClassificationPipeline" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TokenClassificationPipeline"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/token_classification.py#L86" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args_parser<span class="opacity-60"> = &lt;transformers.pipelines.token_classification.TokenClassificationArgumentHandler object at 0x7f45b52f37c0&gt;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.modelcard" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.modelcard"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code> <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.num_workers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.num_workers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.args_parser" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.args_parser"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args_parser</strong> (<a href="/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.binary_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.binary_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.ignore_labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.ignore_labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ignore_labels</strong> (<code>List[str]</code>, defaults to <code>[&quot;O&quot;]</code>) &#x2014; A list of labels to ignore.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.grouped_entities" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.grouped_entities"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>grouped_entities</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; DEPRECATED, use <code>aggregation_strategy</code> instead. Whether or not to group the tokens corresponding to the same entity together in the predictions or not.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.aggregation_strategy" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.aggregation_strategy"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>aggregation_strategy</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;none&quot;</code>) &#x2014; The strategy to fuse (or not) tokens based on the model prediction.</p> <ul> <li>&#x201C;none&#x201D; : Will simply not do any aggregation and simply return raw results from the model</li> <li>&#x201C;simple&#x201D; : Will attempt to group entities following the default schema. (A, B-TAG), (B, I-TAG), (C, I-TAG), (D, B-TAG2) (E, B-TAG2) will end up being [{&#x201C;word&#x201D;: ABC, &#x201C;entity&#x201D;: &#x201C;TAG&#x201D;}, {&#x201C;word&#x201D;: &#x201C;D&#x201D;, &#x201C;entity&#x201D;: &#x201C;TAG2&#x201D;}, {&#x201C;word&#x201D;: &#x201C;E&#x201D;, &#x201C;entity&#x201D;: &#x201C;TAG2&#x201D;}] Notice that two consecutive B tags will end up as different entities. On word based languages, we might end up splitting words undesirably : Imagine Microsoft being tagged as [{&#x201C;word&#x201D;: &#x201C;Micro&#x201D;, &#x201C;entity&#x201D;: &#x201C;ENTERPRISE&#x201D;}, {&#x201C;word&#x201D;: &#x201C;soft&#x201D;, &#x201C;entity&#x201D;: &#x201C;NAME&#x201D;}]. Look for FIRST, MAX, AVERAGE for ways to mitigate that and disambiguate words (on languages that support that meaning, which is basically tokens separated by a space). These mitigations will only work on real words, &#x201C;New york&#x201D; might still be tagged with two different entities.</li> <li>&#x201C;first&#x201D; : (works only on word based models) Will use the <code>SIMPLE</code> strategy except that words, cannot end up with different tags. Words will simply use the tag of the first token of the word when there is ambiguity.</li> <li>&#x201C;average&#x201D; : (works only on word based models) Will use the <code>SIMPLE</code> strategy except that words, cannot end up with different tags. scores will be averaged first across tokens, and then the maximum label is applied.</li> <li>&#x201C;max&#x201D; : (works only on word based models) Will use the <code>SIMPLE</code> strategy except that words, cannot end up with different tags. Word entity will simply be the token with the maximum score.</li> </ul><!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Named Entity Recognition pipeline using any <code>ModelForTokenClassification</code>. See the <a href="../task_summary#named-entity-recognition">named entity recognition examples</a> for more information.</p> <p>This token recognition pipeline can currently be loaded from <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> using the following task identifier: <code>&quot;ner&quot;</code> (for predicting the classes of tokens in a sequence: person, organisation, location or miscellaneous).</p> <p>The models that this pipeline can use are models that have been fine-tuned on a token classification task. See the up-to-date list of available models on <a href="https://huggingface.co/models?filter=token-classification" rel="nofollow">huggingface.co/models</a>.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TokenClassificationPipeline.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.TokenClassificationPipeline.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TokenClassificationPipeline.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/token_classification.py#L160" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60">: typing.Union[str, typing.List[str]]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>A list or a list of list of <code>dict</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.__call__.inputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.__call__.inputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; One or several texts (or one list of texts) for token classification.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TokenClassificationPipeline.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>A list or a list of list of <code>dict</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Each result comes as a list of dictionaries (one for each token in the corresponding input, or each entity if this pipeline was instantiated with an aggregation_strategy) with the following keys:</p> <ul> <li><strong>word</strong> (<code>str</code>) — The token/word classified.</li> <li><strong>score</strong> (<code>float</code>) — The corresponding probability for <code>entity</code>.</li> <li><strong>entity</strong> (<code>str</code>) — The entity predicted for that token/word (it is named <em>entity_group</em> when <em>aggregation_strategy</em> is not <code>"none"</code>.</li> <li><strong>index</strong> (<code>int</code>, only present when <code>aggregation_strategy="none"</code>) — The index of the corresponding token in the sentence.</li> <li><strong>start</strong> (<code>int</code>, <em>optional</em>) — The index of the start of the corresponding entity in the sentence. Only exists if the offsets are available within the tokenizer</li> <li><strong>end</strong> (<code>int</code>, <em>optional</em>) — The index of the end of the corresponding entity in the sentence. Only exists if the offsets are available within the tokenizer</li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>Classify each token of the text(s) given as inputs.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TokenClassificationPipeline.aggregate_words"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>aggregate_words</span></h4><!-- HTML_TAG_END --> <a id="transformers.TokenClassificationPipeline.aggregate_words" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TokenClassificationPipeline.aggregate_words"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/token_classification.py#L366" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">entities<span class="opacity-60">: typing.List[dict]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">aggregation_strategy<span class="opacity-60">: AggregationStrategy</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Override tokens from a given word that disagree to force agreement on word boundaries.</p> <p>Example: micro|soft| com|pany| B-ENT I-NAME I-ENT I-ENT will be rewritten with first strategy as microsoft| company| B-ENT I-ENT</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TokenClassificationPipeline.gather_pre_entities"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>gather_pre_entities</span></h4><!-- HTML_TAG_END --> <a id="transformers.TokenClassificationPipeline.gather_pre_entities" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TokenClassificationPipeline.gather_pre_entities"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/token_classification.py#L252" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sentence<span class="opacity-60">: str</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">offset_mapping<span class="opacity-60">: typing.Union[typing.List[typing.Tuple[int, int]], NoneType]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">special_tokens_mask<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">aggregation_strategy<span class="opacity-60">: AggregationStrategy</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Fuse various numpy arrays into dicts with all the information needed for aggregation</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TokenClassificationPipeline.group_entities"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>group_entities</span></h4><!-- HTML_TAG_END --> <a id="transformers.TokenClassificationPipeline.group_entities" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TokenClassificationPipeline.group_entities"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/token_classification.py#L428" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">entities<span class="opacity-60">: typing.List[dict]</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.group_entities.entities" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.group_entities.entities"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>entities</strong> (<code>dict</code>) &#x2014; The entities predicted by the pipeline.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Find and group together the adjacent tokens with the same entity predicted.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TokenClassificationPipeline.group_sub_entities"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>group_sub_entities</span></h4><!-- HTML_TAG_END --> <a id="transformers.TokenClassificationPipeline.group_sub_entities" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TokenClassificationPipeline.group_sub_entities"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/token_classification.py#L393" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">entities<span class="opacity-60">: typing.List[dict]</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenClassificationPipeline.group_sub_entities.entities" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenClassificationPipeline.group_sub_entities.entities"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>entities</strong> (<code>dict</code>) &#x2014; The entities predicted by the pipeline.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Group together the adjacent tokens with the same entity predicted.</p></div></div> <h3 class="relative group"><a id="transformers.TranslationPipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TranslationPipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TranslationPipeline </span></h3> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TranslationPipeline"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TranslationPipeline</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TranslationPipeline" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TranslationPipeline"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/text2text_generation.py#L253" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TranslationPipeline.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TranslationPipeline.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TranslationPipeline.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TranslationPipeline.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TranslationPipeline.modelcard" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TranslationPipeline.modelcard"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code> <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TranslationPipeline.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TranslationPipeline.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TranslationPipeline.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TranslationPipeline.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TranslationPipeline.num_workers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TranslationPipeline.num_workers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TranslationPipeline.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TranslationPipeline.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TranslationPipeline.args_parser" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TranslationPipeline.args_parser"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args_parser</strong> (<a href="/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TranslationPipeline.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TranslationPipeline.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TranslationPipeline.binary_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TranslationPipeline.binary_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Translates from one language to another.</p> <p>This translation pipeline can currently be loaded from <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> using the following task identifier: <code>&quot;translation_xx_to_yy&quot;</code>.</p> <p>The models that this pipeline can use are models that have been fine-tuned on a translation task. See the up-to-date list of available models on <a href="https://huggingface.co/models?filter=translation" rel="nofollow">huggingface.co/models</a>.</p> <p>Usage:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->en_fr_translator = pipeline(<span class="hljs-string">&quot;translation_en_to_fr&quot;</span>) en_fr_translator(<span class="hljs-string">&quot;How old are you?&quot;</span>)<!-- HTML_TAG_END --></pre></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TranslationPipeline.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.TranslationPipeline.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TranslationPipeline.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/text2text_generation.py#L305" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>A list or a list of list of <code>dict</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TranslationPipeline.__call__.args" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TranslationPipeline.__call__.args"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; Texts to be translated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TranslationPipeline.__call__.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TranslationPipeline.__call__.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to include the tensors of predictions (as token indices) in the outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TranslationPipeline.__call__.return_text" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TranslationPipeline.__call__.return_text"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_text</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to include the decoded texts in the outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TranslationPipeline.__call__.clean_up_tokenization_spaces" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TranslationPipeline.__call__.clean_up_tokenization_spaces"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to clean up the potential extra spaces in the text output.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TranslationPipeline.__call__.src_lang" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TranslationPipeline.__call__.src_lang"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>src_lang</strong> (<code>str</code>, <em>optional</em>) &#x2014; The language of the input. Might be required for multilingual models. Will not have any effect for single pair translation models<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TranslationPipeline.__call__.tgt_lang" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TranslationPipeline.__call__.tgt_lang"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tgt_lang</strong> (<code>str</code>, <em>optional</em>) &#x2014; The language of the desired output. Might be required for multilingual models. Will not have any effect for single pair translation models generate_kwargs &#x2014; Additional keyword arguments to pass along to the generate method of the model (see the generate method corresponding to your framework <a href="./model#generative-models">here</a>).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TranslationPipeline.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>A list or a list of list of <code>dict</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Each result comes as a dictionary with the following keys:</p> <ul> <li><strong>translation_text</strong> (<code>str</code>, present when <code>return_text=True</code>) — The translation.</li> <li><strong>translation_token_ids</strong> (<code>torch.Tensor</code> or <code>tf.Tensor</code>, present when <code>return_tensors=True</code>) — The token ids of the translation.</li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>Translate the text(s) given as inputs.</p></div></div> <h3 class="relative group"><a id="transformers.ZeroShotClassificationPipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotClassificationPipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ZeroShotClassificationPipeline </span></h3> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ZeroShotClassificationPipeline"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ZeroShotClassificationPipeline</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ZeroShotClassificationPipeline" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ZeroShotClassificationPipeline"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/zero_shot_classification.py#L47" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args_parser<span class="opacity-60"> = &lt;transformers.pipelines.zero_shot_classification.ZeroShotClassificationArgumentHandler object at 0x7f45b52fe1c0&gt;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotClassificationPipeline.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotClassificationPipeline.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotClassificationPipeline.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotClassificationPipeline.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotClassificationPipeline.modelcard" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotClassificationPipeline.modelcard"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code> <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotClassificationPipeline.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotClassificationPipeline.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotClassificationPipeline.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotClassificationPipeline.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotClassificationPipeline.num_workers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotClassificationPipeline.num_workers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotClassificationPipeline.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotClassificationPipeline.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotClassificationPipeline.args_parser" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotClassificationPipeline.args_parser"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args_parser</strong> (<a href="/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotClassificationPipeline.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotClassificationPipeline.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotClassificationPipeline.binary_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotClassificationPipeline.binary_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>NLI-based zero-shot classification pipeline using a <code>ModelForSequenceClassification</code> trained on NLI (natural language inference) tasks.</p> <p>Any combination of sequences and labels can be passed and each combination will be posed as a premise/hypothesis pair and passed to the pretrained model. Then, the logit for <em>entailment</em> is taken as the logit for the candidate label being valid. Any NLI model can be used, but the id of the <em>entailment</em> label must be included in the model config’s :attr:<em>~transformers.PretrainedConfig.label2id</em>.</p> <p>This NLI pipeline can currently be loaded from <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> using the following task identifier: <code>&quot;zero-shot-classification&quot;</code>.</p> <p>The models that this pipeline can use are models that have been fine-tuned on an NLI task. See the up-to-date list of available models on <a href="https://huggingface.co/models?search=nli" rel="nofollow">huggingface.co/models</a>.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ZeroShotClassificationPipeline.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.ZeroShotClassificationPipeline.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ZeroShotClassificationPipeline.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/zero_shot_classification.py#L139" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sequences<span class="opacity-60">: typing.Union[str, typing.List[str]]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>A <code>dict</code> or a list of <code>dict</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotClassificationPipeline.__call__.sequences" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotClassificationPipeline.__call__.sequences"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequences</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; The sequence(s) to classify, will be truncated if the model input is too large.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotClassificationPipeline.__call__.candidate_labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotClassificationPipeline.__call__.candidate_labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>candidate_labels</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; The set of possible class labels to classify each sequence into. Can be a single label, a string of comma-separated labels, or a list of labels.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotClassificationPipeline.__call__.hypothesis_template" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotClassificationPipeline.__call__.hypothesis_template"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hypothesis_template</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;This example is {}.&quot;</code>) &#x2014; The template used to turn each label into an NLI-style hypothesis. This template must include a {} or similar syntax for the candidate label to be inserted into the template. For example, the default template is <code>&quot;This example is {}.&quot;</code> With the candidate label <code>&quot;sports&quot;</code>, this would be fed into the model like <code>&quot;&lt;cls&gt; sequence to classify &lt;sep&gt; This example is sports . &lt;sep&gt;&quot;</code>. The default template works well in many cases, but it may be worthwhile to experiment with different templates depending on the task setting.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotClassificationPipeline.__call__.multi_label" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotClassificationPipeline.__call__.multi_label"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>multi_label</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not multiple candidate labels can be true. If <code>False</code>, the scores are normalized such that the sum of the label likelihoods for each sequence is 1. If <code>True</code>, the labels are considered independent and probabilities are normalized for each candidate by doing a softmax of the entailment score vs. the contradiction score.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.ZeroShotClassificationPipeline.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>A <code>dict</code> or a list of <code>dict</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Each result comes as a dictionary with the following keys:</p> <ul> <li><strong>sequence</strong> (<code>str</code>) — The sequence for which this is the output.</li> <li><strong>labels</strong> (<code>List[str]</code>) — The labels sorted by order of likelihood.</li> <li><strong>scores</strong> (<code>List[float]</code>) — The probabilities for each of the labels.</li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>Classify the sequence(s) given as inputs. See the <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.ZeroShotClassificationPipeline">ZeroShotClassificationPipeline</a> documentation for more information.</p></div></div> <h3 class="relative group"><a id="transformers.ZeroShotImageClassificationPipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotImageClassificationPipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ZeroShotImageClassificationPipeline </span></h3> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ZeroShotImageClassificationPipeline"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ZeroShotImageClassificationPipeline</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ZeroShotImageClassificationPipeline" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ZeroShotImageClassificationPipeline"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/zero_shot_image_classification.py#L29" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotImageClassificationPipeline.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotImageClassificationPipeline.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotImageClassificationPipeline.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotImageClassificationPipeline.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotImageClassificationPipeline.modelcard" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotImageClassificationPipeline.modelcard"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code> <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotImageClassificationPipeline.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotImageClassificationPipeline.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotImageClassificationPipeline.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotImageClassificationPipeline.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotImageClassificationPipeline.num_workers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotImageClassificationPipeline.num_workers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotImageClassificationPipeline.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotImageClassificationPipeline.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotImageClassificationPipeline.args_parser" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotImageClassificationPipeline.args_parser"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args_parser</strong> (<a href="/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotImageClassificationPipeline.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotImageClassificationPipeline.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotImageClassificationPipeline.binary_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotImageClassificationPipeline.binary_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Zero shot image classification pipeline using <code>CLIPModel</code>. This pipeline predicts the class of an image when you provide an image and a set of <code>candidate_labels</code>.</p> <p>This image classification pipeline can currently be loaded from <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.pipeline">pipeline()</a> using the following task identifier: <code>&quot;zero-shot-image-classification&quot;</code>.</p> <p>See the list of available models on <a href="https://huggingface.co/models?filter=zero-shot-image-classification" rel="nofollow">huggingface.co/models</a>.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ZeroShotImageClassificationPipeline.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.ZeroShotImageClassificationPipeline.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ZeroShotImageClassificationPipeline.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/zero_shot_image_classification.py#L48" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">images<span class="opacity-60">: typing.Union[str, typing.List[str], ForwardRef(&#39;Image&#39;), typing.List[ForwardRef(&#39;Image&#39;)]]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotImageClassificationPipeline.__call__.images" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotImageClassificationPipeline.__call__.images"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>images</strong> (<code>str</code>, <code>List[str]</code>, <code>PIL.Image</code> or <code>List[PIL.Image]</code>) &#x2014; The pipeline handles three types of images:</p> <ul> <li>A string containing a http link pointing to an image</li> <li>A string containing a local path to an image</li> <li>An image loaded in PIL directly</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotImageClassificationPipeline.__call__.candidate_labels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotImageClassificationPipeline.__call__.candidate_labels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>candidate_labels</strong> (<code>List[str]</code>) &#x2014; The candidate labels for this image<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ZeroShotImageClassificationPipeline.__call__.hypothesis_template" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ZeroShotImageClassificationPipeline.__call__.hypothesis_template"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hypothesis_template</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;This is a photo of {}&quot;</code>) &#x2014; The sentence used in cunjunction with <em>candidate_labels</em> to attempt the image classification by replacing the placeholder with the candidate_labels. Then likelihood is estimated by using logits_per_image<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Assign labels to the image(s) passed as inputs.</p></div></div> <h2 class="relative group"><a id="transformers.Pipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Pipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Parent class: <code>Pipeline</code></span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Pipeline"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">Pipeline</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.Pipeline" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Pipeline"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/base.py#L711" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model<span class="opacity-60">: typing.Union[ForwardRef(&#39;PreTrainedModel&#39;), ForwardRef(&#39;TFPreTrainedModel&#39;)]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer<span class="opacity-60">: typing.Optional[transformers.tokenization_utils.PreTrainedTokenizer] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">feature_extractor<span class="opacity-60">: typing.Optional[ForwardRef(&#39;SequenceFeatureExtractor&#39;)] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">modelcard<span class="opacity-60">: typing.Optional[transformers.modelcard.ModelCard] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">framework<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">task<span class="opacity-60">: str = &#39;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args_parser<span class="opacity-60">: ArgumentHandler = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">device<span class="opacity-60">: int = -1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">binary_output<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Pipeline.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Pipeline.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>) &#x2014; The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> for PyTorch and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> for TensorFlow.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Pipeline.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Pipeline.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>) &#x2014; The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Pipeline.modelcard" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Pipeline.modelcard"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>modelcard</strong> (<code>str</code> or <code>ModelCard</code> <em>optional</em>) &#x2014; Model card attributed to the model for this pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Pipeline.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Pipeline.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>) &#x2014; The framework to use, either <code>&quot;pt&quot;</code> for PyTorch or <code>&quot;tf&quot;</code> for TensorFlow. The specified framework must be installed.</p> <p>If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the <code>model</code>, or to PyTorch if no model is provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Pipeline.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Pipeline.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>, defaults to <code>&quot;&quot;</code>) &#x2014; A task-identifier for the pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Pipeline.num_workers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Pipeline.num_workers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Pipeline.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Pipeline.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; When the pipeline will use <em>DataLoader</em> (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read <a href="https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching" rel="nofollow">Batching with pipelines</a> .<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Pipeline.args_parser" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Pipeline.args_parser"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args_parser</strong> (<a href="/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.pipelines.ArgumentHandler">ArgumentHandler</a>, <em>optional</em>) &#x2014; Reference to the object in charge of parsing supplied pipeline parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Pipeline.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Pipeline.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Pipeline.binary_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Pipeline.binary_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>binary_output</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>The Pipeline class is the class from which all pipelines inherit. Refer to this class for methods shared across different pipelines.</p> <p>Base class implementing pipelined operations. Pipeline workflow is defined as a sequence of the following operations:</p> <p>Input -&gt; Tokenization -&gt; Model Inference -&gt; Post-Processing (task dependent) -&gt; Output</p> <p>Pipeline supports running on CPU or GPU through the device argument (see below).</p> <p>Some pipeline, like for instance <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.FeatureExtractionPipeline">FeatureExtractionPipeline</a> (<code>&#39;feature-extraction&#39;</code>) output large tensor object as nested-lists. In order to avoid dumping such large structure as textual data we provide the <code>binary_output</code> constructor argument. If set to <code>True</code>, the output will be stored in the pickle format.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Pipeline.check_model_type"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>check_model_type</span></h4><!-- HTML_TAG_END --> <a id="transformers.Pipeline.check_model_type" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Pipeline.check_model_type"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/base.py#L864" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">supported_models<span class="opacity-60">: typing.Union[typing.List[str], dict]</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Pipeline.check_model_type.supported_models" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Pipeline.check_model_type.supported_models"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>supported_models</strong> (<code>List[str]</code> or <code>dict</code>) &#x2014; The list of models supported by the pipeline, or a dictionary with model class values.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Check if the model class is in supported by the pipeline.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Pipeline.device_placement"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>device_placement</span></h4><!-- HTML_TAG_END --> <a id="transformers.Pipeline.device_placement" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Pipeline.device_placement"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/base.py#L806" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Context Manager allowing tensor allocation on the user-specified device in framework agnostic way.</p> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-comment"># Explicitly ask for tensor allocation on CUDA device :0</span> pipe = pipeline(..., device=<span class="hljs-number">0</span>) <span class="hljs-keyword">with</span> pipe.device_placement(): <span class="hljs-comment"># Every framework specific tensor allocation will be done on the request device</span> output = pipe(...)<!-- HTML_TAG_END --></pre></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Pipeline.ensure_tensor_on_device"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>ensure_tensor_on_device</span></h4><!-- HTML_TAG_END --> <a id="transformers.Pipeline.ensure_tensor_on_device" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Pipeline.ensure_tensor_on_device"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/base.py#L832" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**inputs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>Dict[str, torch.Tensor]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Pipeline.ensure_tensor_on_device.inputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Pipeline.ensure_tensor_on_device.inputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs</strong> (keyword arguments that should be <code>torch.Tensor</code>, the rest is ignored) &#x2014; The tensors to place on <code>self.device</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Pipeline.ensure_tensor_on_device.Recursive" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Pipeline.ensure_tensor_on_device.Recursive"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>Recursive</strong> on lists <strong>only</strong>. &#x2014;<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.Pipeline.ensure_tensor_on_device.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>Dict[str, torch.Tensor]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The same as <code>inputs</code> but on the proper device.</p> <!-- HTML_TAG_END --></p></div></div> <p>Ensure PyTorch tensors are on the specified device.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Pipeline.postprocess"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>postprocess</span></h4><!-- HTML_TAG_END --> <a id="transformers.Pipeline.postprocess" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Pipeline.postprocess"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/base.py#L920" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model_outputs<span class="opacity-60">: ModelOutput</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**postprocess_parameters<span class="opacity-60">: typing.Dict</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Postprocess will receive the raw outputs of the <code>_forward</code> method, generally tensors, and reformat them into something more friendly. Generally it will output a list or a dict or results (containing just strings and numbers).</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Pipeline.predict"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>predict</span></h4><!-- HTML_TAG_END --> <a id="transformers.Pipeline.predict" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Pipeline.predict"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/base.py#L800" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">X<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Scikit / Keras interface to transformers’ pipelines. This method will forward to <strong>call</strong>().</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Pipeline.preprocess"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>preprocess</span></h4><!-- HTML_TAG_END --> <a id="transformers.Pipeline.preprocess" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Pipeline.preprocess"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/base.py#L899" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_<span class="opacity-60">: typing.Any</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**preprocess_parameters<span class="opacity-60">: typing.Dict</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Preprocess will take the <code>input_</code> of a specific pipeline and return a dictionnary of everything necessary for <code>_forward</code> to run properly. It should contain at least one tensor, but might have arbitrary other items.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Pipeline.save_pretrained"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save_pretrained</span></h4><!-- HTML_TAG_END --> <a id="transformers.Pipeline.save_pretrained" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Pipeline.save_pretrained"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/base.py#L770" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_directory<span class="opacity-60">: str</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Pipeline.save_pretrained.save_directory" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Pipeline.save_pretrained.save_directory"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>save_directory</strong> (<code>str</code>) &#x2014; A path to the directory where to saved. It will be created if it doesn&#x2019;t exist.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Save the pipeline’s model and tokenizer.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Pipeline.transform"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transform</span></h4><!-- HTML_TAG_END --> <a id="transformers.Pipeline.transform" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Pipeline.transform"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/base.py#L794" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">X<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Scikit / Keras interface to transformers’ pipelines. This method will forward to <strong>call</strong>().</p></div></div> <script type="module" data-hydrate="11f74yl"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="11f74yl"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/main_classes/pipelines.mdx-dd4bd714.js") ], params: {} } }); </script>
428
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/main_classes/model.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;models&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.PreTrainedModel&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;model-instantiation-dtype&quot;,&quot;title&quot;:&quot;Model Instantiation dtype&quot;}],&quot;title&quot;:&quot;PreTrainedModel&quot;},{&quot;local&quot;:&quot;transformers.modeling_utils.ModuleUtilsMixin&quot;,&quot;title&quot;:&quot;ModuleUtilsMixin&quot;},{&quot;local&quot;:&quot;transformers.TFPreTrainedModel&quot;,&quot;title&quot;:&quot;TFPreTrainedModel&quot;},{&quot;local&quot;:&quot;transformers.modeling_tf_utils.TFModelUtilsMixin&quot;,&quot;title&quot;:&quot;TFModelUtilsMixin&quot;},{&quot;local&quot;:&quot;transformers.FlaxPreTrainedModel&quot;,&quot;title&quot;:&quot;FlaxPreTrainedModel&quot;},{&quot;local&quot;:&quot;transformers.file_utils.PushToHubMixin&quot;,&quot;title&quot;:&quot;Pushing to the Hub&quot;}],&quot;title&quot;:&quot;Models&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/main_classes/model.mdx-5e2df875.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="models" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#models"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Models </span></h1> <p>The base classes <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>, <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a>, and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel">FlaxPreTrainedModel</a> implement the common methods for loading/saving a model either from a local file or directory, or from a pretrained model configuration provided by the library (downloaded from HuggingFace’s AWS S3 repository).</p> <p><a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> also implement a few methods which are common among all the models to:</p> <ul><li>resize the input token embeddings when new tokens are added to the vocabulary</li> <li>prune the attention heads of the model.</li></ul> <p>The other methods that are common to each model are defined in <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.modeling_utils.ModuleUtilsMixin">ModuleUtilsMixin</a> (for the PyTorch models) and <code>TFModuleUtilsMixin</code> (for the TensorFlow models) or for text generation, <a href="/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin">GenerationMixin</a> (for the PyTorch models), <a href="/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_tf_utils.TFGenerationMixin">TFGenerationMixin</a> (for the TensorFlow models) and <a href="/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_flax_utils.FlaxGenerationMixin">FlaxGenerationMixin</a> (for the Flax/JAX models).</p> <h2 class="relative group"><a id="transformers.PreTrainedModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>PreTrainedModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">PreTrainedModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.PreTrainedModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L423" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: PretrainedConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*inputs<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Base class for all models.</p> <p><a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> takes care of storing the configuration of the models and handles methods for loading, downloading and saving models as well as a few methods common to all models to:</p> <ul><li>resize the input embeddings,</li> <li>prune heads in the self-attention heads.</li></ul> <p>Class attributes (overridden by derived classes):</p> <ul><li><p><strong>config_class</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>) — A subclass of <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> to use as configuration class for this model architecture.</p></li> <li><p><strong>load_tf_weights</strong> (<code>Callable</code>) — A python <em>method</em> for loading a TensorFlow checkpoint in a PyTorch model, taking as arguments:</p> <ul><li><strong>model</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>) — An instance of the model on which to load the TensorFlow checkpoint.</li> <li><strong>config</strong> (<code>PreTrainedConfig</code>) — An instance of the configuration associated to the model.</li> <li><strong>path</strong> (<code>str</code>) — A path to the TensorFlow checkpoint.</li></ul></li> <li><p><strong>base_model_prefix</strong> (<code>str</code>) — A string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model.</p></li> <li><p><strong>is_parallelizable</strong> (<code>bool</code>) — A flag indicating whether this model supports model parallelization.</p></li> <li><p><strong>main_input_name</strong> (<code>str</code>) — The name of the principal input to the model (often <code>input_ids</code> for NLP models, <code>pixel_values</code> for vision models and <code>input_values</code> for speech models).</p></li></ul> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.file_utils.PushToHubMixin.push_to_hub"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>push_to_hub</span></h4><!-- HTML_TAG_END --> <a id="transformers.file_utils.PushToHubMixin.push_to_hub" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.file_utils.PushToHubMixin.push_to_hub"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/file_utils.py#L2842" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">repo_path_or_name<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">repo_url<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_temp_dir<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">commit_message<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">organization<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">private<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_auth_token<span class="opacity-60">: typing.Union[bool, str, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**model_card_kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>str</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.repo_path_or_name" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.repo_path_or_name"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>repo_path_or_name</strong> (<code>str</code>, <em>optional</em>) &#x2014; Can either be a repository name for your model in the Hub or a path to a local folder (in which case the repository will have the name of that local folder). If not specified, will default to the name given by <code>repo_url</code> and a local directory with that name will be created.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.repo_url" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.repo_url"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>repo_url</strong> (<code>str</code>, <em>optional</em>) &#x2014; Specify this in case you want to push to an existing repository in the hub. If unspecified, a new repository will be created in your namespace (unless you specify an <code>organization</code>) with <code>repo_name</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.use_temp_dir" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.use_temp_dir"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_temp_dir</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to clone the distant repo in a temporary directory or in <code>repo_path_or_name</code> inside the current working directory. This will slow things down if you are making changes in an existing repo since you will need to clone the repo before every push.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.commit_message" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.commit_message"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>commit_message</strong> (<code>str</code>, <em>optional</em>) &#x2014; Message to commit while pushing. Will default to <code>&quot;add model&quot;</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.organization" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.organization"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>organization</strong> (<code>str</code>, <em>optional</em>) &#x2014; Organization in which you want to push your model (you must be a member of this organization).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.private" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.private"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>private</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not the repository created should be private (requires a paying subscription).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.use_auth_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.use_auth_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_auth_token</strong> (<code>bool</code> or <code>str</code>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>transformers-cli login</code> (stored in <code>~/.huggingface</code>). Will default to <code>True</code> if <code>repo_url</code> is not specified.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.file_utils.PushToHubMixin.push_to_hub.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>str</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The url of the commit of your model in the given repository.</p> <!-- HTML_TAG_END --></p></div></div> <p>Upload the model checkpoint to the 🤗 Model Hub while synchronizing a local clone of the repo in <code>repo_path_or_name</code>.</p> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModel model = AutoModel.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-comment"># Push the model to your namespace with the name &quot;my-finetuned-bert&quot; and have a local clone in the</span> <span class="hljs-comment"># *my-finetuned-bert* folder.</span> model.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>) <span class="hljs-comment"># Push the model to your namespace with the name &quot;my-finetuned-bert&quot; with no local clone.</span> model.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, use_temp_dir=<span class="hljs-literal">True</span>) <span class="hljs-comment"># Push the model to an organization with the name &quot;my-finetuned-bert&quot; and have a local clone in the</span> <span class="hljs-comment"># *my-finetuned-bert* folder.</span> model.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, organization=<span class="hljs-string">&quot;huggingface&quot;</span>) <span class="hljs-comment"># Make a change to an existing repo that has been cloned locally in *my-finetuned-bert*.</span> model.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, repo_url=<span class="hljs-string">&quot;https://huggingface.co/sgugger/my-finetuned-bert&quot;</span>)<!-- HTML_TAG_END --></pre></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedModel.from_pretrained"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>from_pretrained</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedModel.from_pretrained" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedModel.from_pretrained"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L1091" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pretrained_model_name_or_path<span class="opacity-60">: typing.Union[str, os.PathLike, NoneType]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*model_args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.from_pretrained.pretrained_model_name_or_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.from_pretrained.pretrained_model_name_or_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pretrained_model_name_or_path</strong> (<code>str</code> or <code>os.PathLike</code>, <em>optional</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>A path to a <em>directory</em> containing model weights saved using <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.save_pretrained">save_pretrained()</a>, e.g., <code>./my_model_directory/</code>.</li> <li>A path or url to a <em>tensorflow index checkpoint file</em> (e.g, <code>./tf_model/model.ckpt.index</code>). In this case, <code>from_tf</code> should be set to <code>True</code> and a configuration object should be provided as <code>config</code> argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.</li> <li>A path or url to a model folder containing a <em>flax checkpoint file</em> in <em>.msgpack</em> format (e.g, <code>./flax_model/</code> containing <code>flax_model.msgpack</code>). In this case, <code>from_flax</code> should be set to <code>True</code>.</li> <li><code>None</code> if you are both providing the configuration and state dictionary (resp. with keyword arguments <code>config</code> and <code>state_dict</code>).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.from_pretrained.model_args" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.from_pretrained.model_args"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model_args</strong> (sequence of positional arguments, <em>optional</em>) &#x2014; All remaining positional arguments will be passed to the underlying model&#x2019;s <code>__init__</code> method.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.from_pretrained.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.from_pretrained.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<code>Union[PretrainedConfig, str, os.PathLike]</code>, <em>optional</em>) &#x2014; Can be either:</p> <ul> <li>an instance of a class derived from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>,</li> <li>a string or path valid as input to <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig.from_pretrained">from_pretrained()</a>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Instantiate a pretrained pytorch model from a pre-trained model configuration.</p> <p>The model is set in evaluation mode by default using <code>model.eval()</code> (Dropout modules are deactivated). To train the model, you should first set it back in training mode with <code>model.train()</code>.</p> <p>The warning <em>Weights from XXX not initialized from pretrained model</em> means that the weights of XXX do not come pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning task.</p> <p>The warning <em>Weights from XXX not used in YYY</em> means that the layer XXX is not used by YYY, therefore those weights are discarded.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Passing `use_auth_token=True“ is required when you want to use a private model.</p></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Activate the special <a href="https://huggingface.co/transformers/installation.html#offline-mode" rel="nofollow">“offline-mode”</a> to use this method in a firewalled environment.</p></div> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertConfig, BertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = BertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Model was saved using *save_pretrained(&#x27;./test/saved_model/&#x27;)* (for example purposes, not runnable).</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = BertModel.from_pretrained(<span class="hljs-string">&quot;./test/saved_model/&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = BertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">assert</span> model.config.output_attentions == <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a TF checkpoint file instead of a PyTorch model (slower, for example purposes, not runnable).</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = BertConfig.from_json_file(<span class="hljs-string">&quot;./tf_model/my_tf_model_config.json&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = BertModel.from_pretrained(<span class="hljs-string">&quot;./tf_model/my_tf_checkpoint.ckpt.index&quot;</span>, from_tf=<span class="hljs-literal">True</span>, config=config) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a Flax checkpoint file instead of a PyTorch model (slower)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = BertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>, from_flax=<span class="hljs-literal">True</span>)<!-- HTML_TAG_END --></pre></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedModel.get_input_embeddings"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_input_embeddings</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedModel.get_input_embeddings" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedModel.get_input_embeddings"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L575" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>nn.Module</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PreTrainedModel.get_input_embeddings.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>nn.Module</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A torch module mapping vocabulary to hidden states.</p> <!-- HTML_TAG_END --></p></div></div> <p>Returns the model’s input embeddings.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedModel.get_output_embeddings"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_output_embeddings</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedModel.get_output_embeddings" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedModel.get_output_embeddings"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L601" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>nn.Module</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PreTrainedModel.get_output_embeddings.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>nn.Module</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A torch module mapping hidden states to vocabulary.</p> <!-- HTML_TAG_END --></p></div></div> <p>Returns the model’s output embeddings.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedModel.gradient_checkpointing_disable"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>gradient_checkpointing_disable</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedModel.gradient_checkpointing_disable" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedModel.gradient_checkpointing_disable"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L980" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Deactivates gradient checkpointing for the current model.</p> <p>Note that in other frameworks this feature can be referred to as “activation checkpointing” or “checkpoint activations”.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedModel.gradient_checkpointing_enable"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>gradient_checkpointing_enable</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedModel.gradient_checkpointing_enable" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedModel.gradient_checkpointing_enable"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L969" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Activates gradient checkpointing for the current model.</p> <p>Note that in other frameworks this feature can be referred to as “activation checkpointing” or “checkpoint activations”.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedModel.init_weights"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>init_weights</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedModel.init_weights" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedModel.init_weights"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L936" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>If needed prunes and maybe initializes weights.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedModel.post_init"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>post_init</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedModel.post_init" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedModel.post_init"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L494" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>A method executed at the end of each Transformer model initialization, to execute code that needs the model’s modules properly initialized (such as weight initialization).</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedModel.prune_heads"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>prune_heads</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedModel.prune_heads" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedModel.prune_heads"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L952" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">heads_to_prune<span class="opacity-60">: typing.Dict[int, typing.List[int]]</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.prune_heads.heads_to_prune" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.prune_heads.heads_to_prune"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>heads_to_prune</strong> (<code>Dict[int, List[int]]</code>) &#x2014; Dictionary with keys being selected layer indices (<code>int</code>) and associated values being the list of heads to prune in said layer (list of <code>int</code>). For instance {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Prunes heads of the base model.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedModel.register_for_auto_class"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>register_for_auto_class</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedModel.register_for_auto_class" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedModel.register_for_auto_class"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L1817" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">auto_class<span class="opacity-60"> = &#39;AutoModel&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.register_for_auto_class.auto_class" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.register_for_auto_class.auto_class"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>auto_class</strong> (<code>str</code> or <code>type</code>, <em>optional</em>, defaults to <code>&quot;AutoModel&quot;</code>) &#x2014; The auto class to register this new model with.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Register this class with a given auto class. This should only be used for custom models as the ones in the library are already mapped with an auto class.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"><p>This API is experimental and may have some slight breaking changes in the next releases.</p></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedModel.resize_token_embeddings"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>resize_token_embeddings</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedModel.resize_token_embeddings" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedModel.resize_token_embeddings"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L729" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">new_num_tokens<span class="opacity-60">: typing.Optional[int] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>torch.nn.Embedding</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.resize_token_embeddings.new_num_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.resize_token_embeddings.new_num_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>new_num_tokens</strong> (<code>int</code>, <em>optional</em>) &#x2014; The number of new tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or <code>None</code>, just returns a pointer to the input tokens <code>torch.nn.Embedding</code> module of the model without doing anything.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PreTrainedModel.resize_token_embeddings.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>torch.nn.Embedding</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Pointer to the input tokens Embeddings Module of the model.</p> <!-- HTML_TAG_END --></p></div></div> <p>Resizes input token embeddings matrix of the model if <code>new_num_tokens != config.vocab_size</code>.</p> <p>Takes care of tying weights embeddings afterwards if the model class has a <code>tie_weights()</code> method.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedModel.save_pretrained"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save_pretrained</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedModel.save_pretrained" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedModel.save_pretrained"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L1000" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_directory<span class="opacity-60">: typing.Union[str, os.PathLike]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_config<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">state_dict<span class="opacity-60">: typing.Optional[dict] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_function<span class="opacity-60">: typing.Callable = &lt;function save at 0x7f461a422820&gt;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">push_to_hub<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.save_pretrained.save_directory" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.save_pretrained.save_directory"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>save_directory</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; Directory to which to save. Will be created if it doesn&#x2019;t exist.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.save_pretrained.save_config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.save_pretrained.save_config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>save_config</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to save the config of the model. Useful when in distributed training like TPUs and need to call this function on all processes. In this case, set <code>save_config=True</code> only on the main process to avoid race conditions.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.save_pretrained.state_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.save_pretrained.state_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>state_dict</strong> (nested dictionary of <code>torch.Tensor</code>) &#x2014; The state dictionary of the model to save. Will default to <code>self.state_dict()</code>, but can be used to only save parts of the model or if special precautions need to be taken when recovering the state dictionary of a model (like when using model parallelism).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.save_pretrained.save_function" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.save_pretrained.save_function"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>save_function</strong> (<code>Callable</code>) &#x2014; The function to use to save the state dictionary. Useful on distributed training like TPUs when one need to replace <code>torch.save</code> by another method.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.save_pretrained.push_to_hub" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.save_pretrained.push_to_hub"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>push_to_hub</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to push your model to the Hugging Face model hub after saving it.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p>Using <code>push_to_hub=True</code> will synchronize the repository you are pushing to with <code>save_directory</code>, which requires <code>save_directory</code> to be a local clone of the repo you are pushing to if it&#x2019;s an existing folder. Pass along <code>temp_dir=True</code> to use a temporary directory instead.</p> </div> <p>kwargs &#x2014; Additional key word arguments passed along to the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.file_utils.PushToHubMixin.push_to_hub">push_to_hub()</a> method.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Save a model and its configuration file to a directory, so that it can be re-loaded using the <code>[from_pretrained()](/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained)</code> class method.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedModel.set_input_embeddings"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>set_input_embeddings</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedModel.set_input_embeddings" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedModel.set_input_embeddings"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L588" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">value<span class="opacity-60">: Module</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedModel.set_input_embeddings.value" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedModel.set_input_embeddings.value"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>value</strong> (<code>nn.Module</code>) &#x2014; A module mapping vocabulary to hidden states.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Set model’s input embeddings.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedModel.tie_weights"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>tie_weights</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedModel.tie_weights" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedModel.tie_weights"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L616" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Tie the weights between the input embeddings and the output embeddings.</p> <p>If the <code>torchscript</code> flag is set in the configuration, can’t handle parameter sharing so we are cloning the weights instead.</p></div></div> <a id="from_pretrained-torch-dtype"></a> <h3 class="relative group"><a id="model-instantiation-dtype" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#model-instantiation-dtype"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Model Instantiation dtype </span></h3> <p>Under Pytorch a model normally gets instantiated with <code>torch.float32</code> format. This can be an issue if one tries to load a model whose weights are in fp16, since it’d require twice as much memory. To overcome this limitation, you can either explicitly pass the desired <code>dtype</code> using <code>torch_dtype</code> argument:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->model = T5ForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;t5&quot;</span>, torch_dtype=torch.float16)<!-- HTML_TAG_END --></pre></div> <p>or, if you want the model to always load in the most optimal memory pattern, you can use the special value <code>&quot;auto&quot;</code>, and then <code>dtype</code> will be automatically derived from the model’s weights:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->model = T5ForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;t5&quot;</span>, torch_dtype=<span class="hljs-string">&quot;auto&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Models instantiated from scratch can also be told which <code>dtype</code> to use with:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->config = T5Config.from_pretrained(<span class="hljs-string">&quot;t5&quot;</span>) model = AutoModel.from_config(config)<!-- HTML_TAG_END --></pre></div> <p>Due to Pytorch design, this functionality is only available for floating dtypes.</p> <h2 class="relative group"><a id="transformers.modeling_utils.ModuleUtilsMixin" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.ModuleUtilsMixin"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ModuleUtilsMixin </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_utils.ModuleUtilsMixin"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_utils.</span><span class="font-semibold">ModuleUtilsMixin</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_utils.ModuleUtilsMixin" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_utils.ModuleUtilsMixin"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L151" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>A few utilities for <code>torch.nn.Modules</code>, to be used as a mixin.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_utils.ModuleUtilsMixin.add_memory_hooks"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>add_memory_hooks</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_utils.ModuleUtilsMixin.add_memory_hooks" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_utils.ModuleUtilsMixin.add_memory_hooks"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L182" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Add a memory hook before and after each sub-module forward pass to record increase in memory consumption.</p> <p>Increase in memory consumption is stored in a <code>mem_rss_diff</code> attribute for each module and can be reset to zero with <code>model.reset_memory_hooks_state()</code>.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_utils.ModuleUtilsMixin.estimate_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>estimate_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_utils.ModuleUtilsMixin.estimate_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_utils.ModuleUtilsMixin.estimate_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L378" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_dict<span class="opacity-60">: typing.Dict[str, typing.Union[torch.Tensor, typing.Any]]</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>int</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.ModuleUtilsMixin.estimate_tokens.inputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.ModuleUtilsMixin.estimate_tokens.inputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs</strong> (<code>dict</code>) &#x2014; The model inputs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.modeling_utils.ModuleUtilsMixin.estimate_tokens.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>int</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The total number of tokens.</p> <!-- HTML_TAG_END --></p></div></div> <p>Helper function to estimate the total number of tokens from the model inputs.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_utils.ModuleUtilsMixin.floating_point_ops"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>floating_point_ops</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_utils.ModuleUtilsMixin.floating_point_ops" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_utils.ModuleUtilsMixin.floating_point_ops"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L396" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_dict<span class="opacity-60">: typing.Dict[str, typing.Union[torch.Tensor, typing.Any]]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">exclude_embeddings<span class="opacity-60">: bool = True</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>int</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.ModuleUtilsMixin.floating_point_ops.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.ModuleUtilsMixin.floating_point_ops.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>) &#x2014; The batch size for the forward pass.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.ModuleUtilsMixin.floating_point_ops.sequence_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.ModuleUtilsMixin.floating_point_ops.sequence_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequence_length</strong> (<code>int</code>) &#x2014; The number of tokens in each line of the batch.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.ModuleUtilsMixin.floating_point_ops.exclude_embeddings" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.ModuleUtilsMixin.floating_point_ops.exclude_embeddings"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>exclude_embeddings</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to count embedding and softmax operations.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.modeling_utils.ModuleUtilsMixin.floating_point_ops.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>int</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The number of floating-point operations.</p> <!-- HTML_TAG_END --></p></div></div> <p>Get number of (optionally, non-embeddings) floating-point operations for the forward and backward passes of a batch with this transformer model. Default approximation neglects the quadratic dependency on the number of tokens (valid if <code>12 * d_model &lt;&lt; sequence_length</code>) as laid out in <a href="https://arxiv.org/pdf/2001.08361.pdf" rel="nofollow">this paper</a> section 2.1. Should be overridden for transformers with parameter re-use e.g. Albert or Universal Transformers, or if doing long-range modeling with very high sequence lengths.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_utils.ModuleUtilsMixin.get_extended_attention_mask"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_extended_attention_mask</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_utils.ModuleUtilsMixin.get_extended_attention_mask" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_utils.ModuleUtilsMixin.get_extended_attention_mask"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L271" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: Tensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_shape<span class="opacity-60">: typing.Tuple[int]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">device<span class="opacity-60">: &lt;property object at 0x7f45b4efb950&gt;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.ModuleUtilsMixin.get_extended_attention_mask.attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.ModuleUtilsMixin.get_extended_attention_mask.attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attention_mask</strong> (<code>torch.Tensor</code>) &#x2014; Mask with ones indicating tokens to attend to, zeros for tokens to ignore.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.ModuleUtilsMixin.get_extended_attention_mask.input_shape" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.ModuleUtilsMixin.get_extended_attention_mask.input_shape"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_shape</strong> (<code>Tuple[int]</code>) &#x2014; The shape of the input to the model. device &#x2014; (<code>torch.device</code>): The device of the input to the model.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Makes broadcastable attention and causal masks so that future and masked tokens are ignored.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_utils.ModuleUtilsMixin.get_head_mask"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_head_mask</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_utils.ModuleUtilsMixin.get_head_mask" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_utils.ModuleUtilsMixin.get_head_mask"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L314" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_mask<span class="opacity-60">: typing.Optional[torch.Tensor]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_hidden_layers<span class="opacity-60">: int</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">is_attention_chunked<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.ModuleUtilsMixin.get_head_mask.head_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.ModuleUtilsMixin.get_head_mask.head_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_mask</strong> (<code>torch.Tensor</code> with shape <code>[num_heads]</code> or <code>[num_hidden_layers x num_heads]</code>, <em>optional</em>) &#x2014; The mask indicating if we should keep the heads or not (1.0 for keep, 0.0 for discard).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.ModuleUtilsMixin.get_head_mask.num_hidden_layers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.ModuleUtilsMixin.get_head_mask.num_hidden_layers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_hidden_layers</strong> (<code>int</code>) &#x2014; The number of hidden layers in the model. is_attention_chunked &#x2014; (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>): Whether or not the attentions scores are computed by chunks or not.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Prepare the head mask if needed.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_utils.ModuleUtilsMixin.invert_attention_mask"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>invert_attention_mask</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_utils.ModuleUtilsMixin.invert_attention_mask" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_utils.ModuleUtilsMixin.invert_attention_mask"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L218" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attention_mask<span class="opacity-60">: Tensor</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>torch.Tensor</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.ModuleUtilsMixin.invert_attention_mask.encoder_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.ModuleUtilsMixin.invert_attention_mask.encoder_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attention_mask</strong> (<code>torch.Tensor</code>) &#x2014; An attention mask.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.modeling_utils.ModuleUtilsMixin.invert_attention_mask.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>torch.Tensor</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The inverted attention mask.</p> <!-- HTML_TAG_END --></p></div></div> <p>Invert an attention mask (e.g., switches 0. and 1.).</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_utils.ModuleUtilsMixin.num_parameters"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>num_parameters</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_utils.ModuleUtilsMixin.num_parameters" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_utils.ModuleUtilsMixin.num_parameters"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L352" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">only_trainable<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">exclude_embeddings<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>int</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.ModuleUtilsMixin.num_parameters.only_trainable" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.ModuleUtilsMixin.num_parameters.only_trainable"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>only_trainable</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return only the number of trainable parameters<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.ModuleUtilsMixin.num_parameters.exclude_embeddings" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.ModuleUtilsMixin.num_parameters.exclude_embeddings"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>exclude_embeddings</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return only the number of non-embeddings parameters<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.modeling_utils.ModuleUtilsMixin.num_parameters.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>int</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The number of parameters.</p> <!-- HTML_TAG_END --></p></div></div> <p>Get number of (optionally, trainable or non-embeddings) parameters in the module.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_utils.ModuleUtilsMixin.reset_memory_hooks_state"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>reset_memory_hooks_state</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_utils.ModuleUtilsMixin.reset_memory_hooks_state" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_utils.ModuleUtilsMixin.reset_memory_hooks_state"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L194" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Reset the <code>mem_rss_diff</code> attribute of each module (see <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.modeling_utils.ModuleUtilsMixin.add_memory_hooks">add_memory_hooks()</a>).</p></div></div> <h2 class="relative group"><a id="transformers.TFPreTrainedModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFPreTrainedModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFPreTrainedModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFPreTrainedModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFPreTrainedModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFPreTrainedModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L686" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Base class for all TF models.</p> <p><a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel">TFPreTrainedModel</a> takes care of storing the configuration of the models and handles methods for loading, downloading and saving models as well as a few methods common to all models to:</p> <ul><li>resize the input embeddings,</li> <li>prune heads in the self-attention heads.</li></ul> <p>Class attributes (overridden by derived classes):</p> <ul><li><strong>config_class</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>) — A subclass of <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> to use as configuration class for this model architecture.</li> <li><strong>base_model_prefix</strong> (<code>str</code>) — A string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model.</li> <li><strong>main_input_name</strong> (<code>str</code>) — The name of the principal input to the model (often <code>input_ids</code> for NLP models, <code>pixel_values</code> for vision models and <code>input_values</code> for speech models).</li></ul> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.file_utils.PushToHubMixin.push_to_hub"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>push_to_hub</span></h4><!-- HTML_TAG_END --> <a id="transformers.file_utils.PushToHubMixin.push_to_hub" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.file_utils.PushToHubMixin.push_to_hub"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/file_utils.py#L2842" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">repo_path_or_name<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">repo_url<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_temp_dir<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">commit_message<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">organization<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">private<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_auth_token<span class="opacity-60">: typing.Union[bool, str, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**model_card_kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>str</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.repo_path_or_name" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.repo_path_or_name"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>repo_path_or_name</strong> (<code>str</code>, <em>optional</em>) &#x2014; Can either be a repository name for your model in the Hub or a path to a local folder (in which case the repository will have the name of that local folder). If not specified, will default to the name given by <code>repo_url</code> and a local directory with that name will be created.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.repo_url" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.repo_url"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>repo_url</strong> (<code>str</code>, <em>optional</em>) &#x2014; Specify this in case you want to push to an existing repository in the hub. If unspecified, a new repository will be created in your namespace (unless you specify an <code>organization</code>) with <code>repo_name</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.use_temp_dir" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.use_temp_dir"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_temp_dir</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to clone the distant repo in a temporary directory or in <code>repo_path_or_name</code> inside the current working directory. This will slow things down if you are making changes in an existing repo since you will need to clone the repo before every push.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.commit_message" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.commit_message"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>commit_message</strong> (<code>str</code>, <em>optional</em>) &#x2014; Message to commit while pushing. Will default to <code>&quot;add model&quot;</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.organization" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.organization"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>organization</strong> (<code>str</code>, <em>optional</em>) &#x2014; Organization in which you want to push your model (you must be a member of this organization).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.private" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.private"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>private</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not the repository created should be private (requires a paying subscription).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.use_auth_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.use_auth_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_auth_token</strong> (<code>bool</code> or <code>str</code>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>transformers-cli login</code> (stored in <code>~/.huggingface</code>). Will default to <code>True</code> if <code>repo_url</code> is not specified.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.file_utils.PushToHubMixin.push_to_hub.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>str</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The url of the commit of your model in the given repository.</p> <!-- HTML_TAG_END --></p></div></div> <p>Upload the model checkpoint to the 🤗 Model Hub while synchronizing a local clone of the repo in <code>repo_path_or_name</code>.</p> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModel model = TFAutoModel.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-comment"># Push the model to your namespace with the name &quot;my-finetuned-bert&quot; and have a local clone in the</span> <span class="hljs-comment"># *my-finetuned-bert* folder.</span> model.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>) <span class="hljs-comment"># Push the model to your namespace with the name &quot;my-finetuned-bert&quot; with no local clone.</span> model.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, use_temp_dir=<span class="hljs-literal">True</span>) <span class="hljs-comment"># Push the model to an organization with the name &quot;my-finetuned-bert&quot; and have a local clone in the</span> <span class="hljs-comment"># *my-finetuned-bert* folder.</span> model.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, organization=<span class="hljs-string">&quot;huggingface&quot;</span>) <span class="hljs-comment"># Make a change to an existing repo that has been cloned locally in *my-finetuned-bert*.</span> model.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, repo_url=<span class="hljs-string">&quot;https://huggingface.co/sgugger/my-finetuned-bert&quot;</span>)<!-- HTML_TAG_END --></pre></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFPreTrainedModel.compile"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>compile</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFPreTrainedModel.compile" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFPreTrainedModel.compile"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L877" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">optimizer<span class="opacity-60"> = &#39;rmsprop&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60"> = &#39;passthrough&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">metrics<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss_weights<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">weighted_metrics<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">run_eagerly<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">steps_per_execution<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>This is a thin wrapper that sets the model’s loss output head as the loss if the user does not specify a loss function themselves.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFPreTrainedModel.from_pretrained"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>from_pretrained</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFPreTrainedModel.from_pretrained" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFPreTrainedModel.from_pretrained"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L1439" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pretrained_model_name_or_path<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*model_args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.from_pretrained.pretrained_model_name_or_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.from_pretrained.pretrained_model_name_or_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pretrained_model_name_or_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>A path to a <em>directory</em> containing model weights saved using <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel.save_pretrained">save_pretrained()</a>, e.g., <code>./my_model_directory/</code>.</li> <li>A path or url to a <em>PyTorch state_dict save file</em> (e.g, <code>./pt_model/pytorch_model.bin</code>). In this case, <code>from_pt</code> should be set to <code>True</code> and a configuration object should be provided as <code>config</code> argument. This loading path is slower than converting the PyTorch model in a TensorFlow model using the provided conversion scripts and loading the TensorFlow model afterwards.</li> <li><code>None</code> if you are both providing the configuration and state dictionary (resp. with keyword arguments <code>config</code> and <code>state_dict</code>).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.from_pretrained.model_args" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.from_pretrained.model_args"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model_args</strong> (sequence of positional arguments, <em>optional</em>) &#x2014; All remaining positional arguments will be passed to the underlying model&#x2019;s <code>__init__</code> method.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.from_pretrained.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.from_pretrained.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<code>Union[PretrainedConfig, str]</code>, <em>optional</em>) &#x2014; Can be either:</p> <ul> <li>an instance of a class derived from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>,</li> <li>a string valid as input to <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig.from_pretrained">from_pretrained()</a>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Instantiate a pretrained TF 2.0 model from a pre-trained model configuration.</p> <p>The warning <em>Weights from XXX not initialized from pretrained model</em> means that the weights of XXX do not come pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning task.</p> <p>The warning <em>Weights from XXX not used in YYY</em> means that the layer XXX is not used by YYY, therefore those weights are discarded.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Passing <code>use_auth_token=True</code> is required when you want to use a private model.</p></div> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertConfig, TFBertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFBertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Model was saved using *save_pretrained(&#x27;./test/saved_model/&#x27;)* (for example purposes, not runnable).</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFBertModel.from_pretrained(<span class="hljs-string">&quot;./test/saved_model/&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Update configuration during loading.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFBertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>, output_attentions=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">assert</span> model.config.output_attentions == <span class="hljs-literal">True</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a Pytorch model file instead of a TensorFlow checkpoint (slower, for example purposes, not runnable).</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = BertConfig.from_json_file(<span class="hljs-string">&quot;./pt_model/my_pt_model_config.json&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFBertModel.from_pretrained(<span class="hljs-string">&quot;./pt_model/my_pytorch_model.bin&quot;</span>, from_pt=<span class="hljs-literal">True</span>, config=config)<!-- HTML_TAG_END --></pre></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFPreTrainedModel.get_bias"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_bias</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFPreTrainedModel.get_bias" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFPreTrainedModel.get_bias"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L1123" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>tf.Variable</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFPreTrainedModel.get_bias.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>tf.Variable</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The weights representing the bias, None if not an LM model.</p> <!-- HTML_TAG_END --></p></div></div> <p>Dict of bias attached to an LM head. The key represents the name of the bias attribute.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFPreTrainedModel.get_input_embeddings"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_input_embeddings</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFPreTrainedModel.get_input_embeddings" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFPreTrainedModel.get_input_embeddings"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L796" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>tf.Variable</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFPreTrainedModel.get_input_embeddings.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>tf.Variable</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The embeddings layer mapping vocabulary to hidden states.</p> <!-- HTML_TAG_END --></p></div></div> <p>Returns the model’s input embeddings layer.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFPreTrainedModel.get_lm_head"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_lm_head</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFPreTrainedModel.get_lm_head" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFPreTrainedModel.get_lm_head"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L1156" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>tf.keras.layers.Layer</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFPreTrainedModel.get_lm_head.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>tf.keras.layers.Layer</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The LM head layer if the model has one, None if not.</p> <!-- HTML_TAG_END --></p></div></div> <p>The LM Head layer. This method must be overwritten by all the models that have a lm head.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFPreTrainedModel.get_output_embeddings"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_output_embeddings</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFPreTrainedModel.get_output_embeddings" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFPreTrainedModel.get_output_embeddings"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L1063" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>tf.Variable</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFPreTrainedModel.get_output_embeddings.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>tf.Variable</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The new weights mapping vocabulary to hidden states.</p> <!-- HTML_TAG_END --></p></div></div> <p>Returns the model’s output embeddings</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFPreTrainedModel.get_output_layer_with_bias"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_output_layer_with_bias</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFPreTrainedModel.get_output_layer_with_bias" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFPreTrainedModel.get_output_layer_with_bias"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L1100" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>tf.keras.layers.Layer</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFPreTrainedModel.get_output_layer_with_bias.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>tf.keras.layers.Layer</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The layer that handles the bias, None if not an LM model.</p> <!-- HTML_TAG_END --></p></div></div> <p>Get the layer that handles a bias attribute in case the model has an LM head with weights tied to the embeddings</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFPreTrainedModel.get_prefix_bias_name"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_prefix_bias_name</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFPreTrainedModel.get_prefix_bias_name" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFPreTrainedModel.get_prefix_bias_name"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L1113" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>str</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFPreTrainedModel.get_prefix_bias_name.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>str</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The _prefix name of the bias.</p> <!-- HTML_TAG_END --></p></div></div> <p>Get the concatenated _prefix name of the bias from the model name to the parent layer</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFPreTrainedModel.load_repo_checkpoint"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>load_repo_checkpoint</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFPreTrainedModel.load_repo_checkpoint" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFPreTrainedModel.load_repo_checkpoint"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L823" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">repo_path_or_name<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>dict</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.load_repo_checkpoint.repo_path_or_name" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.load_repo_checkpoint.repo_path_or_name"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>repo_path_or_name</strong> (<code>str</code>) &#x2014; Can either be a repository name for your {object} in the Hub or a path to a local folder (in which case the repository will have the name of that local folder).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFPreTrainedModel.load_repo_checkpoint.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>dict</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A dictionary of extra metadata from the checkpoint, most commonly an “epoch” count.</p> <!-- HTML_TAG_END --></p></div></div> <p>Loads a saved checkpoint (model weights and optimizer state) from a repo. Returns the current epoch count when the checkpoint was made.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFPreTrainedModel.prune_heads"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>prune_heads</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFPreTrainedModel.prune_heads" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFPreTrainedModel.prune_heads"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L1365" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">heads_to_prune<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.prune_heads.heads_to_prune" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.prune_heads.heads_to_prune"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>heads_to_prune</strong> (<code>Dict[int, List[int]]</code>) &#x2014; Dictionary with keys being selected layer indices (<code>int</code>) and associated values being the list of heads to prune in said layer (list of <code>int</code>). For instance {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Prunes heads of the base model.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFPreTrainedModel.resize_token_embeddings"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>resize_token_embeddings</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFPreTrainedModel.resize_token_embeddings" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFPreTrainedModel.resize_token_embeddings"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L1165" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">new_num_tokens<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>tf.Variable</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.resize_token_embeddings.new_num_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.resize_token_embeddings.new_num_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>new_num_tokens</strong> (<code>int</code>, <em>optional</em>) &#x2014; The number of new tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or <code>None</code>, just returns a pointer to the input tokens <code>tf.Variable</code> module of the model without doing anything.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFPreTrainedModel.resize_token_embeddings.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>tf.Variable</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Pointer to the input tokens Embeddings Module of the model.</p> <!-- HTML_TAG_END --></p></div></div> <p>Resizes input token embeddings matrix of the model if <code>new_num_tokens != config.vocab_size</code>.</p> <p>Takes care of tying weights embeddings afterwards if the model class has a <code>tie_weights()</code> method.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFPreTrainedModel.save_pretrained"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save_pretrained</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFPreTrainedModel.save_pretrained" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFPreTrainedModel.save_pretrained"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L1377" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_directory<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">saved_model<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">version<span class="opacity-60"> = 1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">push_to_hub<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.save_pretrained.save_directory" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.save_pretrained.save_directory"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>save_directory</strong> (<code>str</code>) &#x2014; Directory to which to save. Will be created if it doesn&#x2019;t exist.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.save_pretrained.saved_model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.save_pretrained.saved_model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>saved_model</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; If the model has to be saved in saved model format as well or not.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.save_pretrained.version" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.save_pretrained.version"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>version</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; The version of the saved model. A saved model needs to be versioned in order to be properly loaded by TensorFlow Serving as detailed in the official documentation <a href="https://www.tensorflow.org/tfx/serving/serving_basic" rel="nofollow">https://www.tensorflow.org/tfx/serving/serving_basic</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.save_pretrained.push_to_hub" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.save_pretrained.push_to_hub"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>push_to_hub</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to push your model to the Hugging Face model hub after saving it.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p>Using <code>push_to_hub=True</code> will synchronize the repository you are pushing to with <code>save_directory</code>, which requires <code>save_directory</code> to be a local clone of the repo you are pushing to if it&#x2019;s an existing folder. Pass along <code>temp_dir=True</code> to use a temporary directory instead.</p> </div> <p>kwargs &#x2014; Additional key word arguments passed along to the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.file_utils.PushToHubMixin.push_to_hub">push_to_hub()</a> method.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Save a model and its configuration file to a directory, so that it can be re-loaded using the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">from_pretrained()</a> class method.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFPreTrainedModel.serving"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>serving</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFPreTrainedModel.serving" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFPreTrainedModel.serving"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L765" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.serving.inputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.serving.inputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs</strong> (<code>Dict[str, tf.Tensor]</code>) &#x2014; The input of the saved model as a dictionary of tensors.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Method used for serving the model.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFPreTrainedModel.serving_output"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>serving_output</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFPreTrainedModel.serving_output" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFPreTrainedModel.serving_output"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L786" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.serving_output.output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.serving_output.output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output</strong> (<code>TFBaseModelOutput</code>) &#x2014; The output returned by the model.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Prepare the output of the saved model. Each model must implement this function.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFPreTrainedModel.set_bias"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>set_bias</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFPreTrainedModel.set_bias" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFPreTrainedModel.set_bias"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L1140" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">value<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.set_bias.value" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.set_bias.value"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>value</strong> (<code>Dict[tf.Variable]</code>) &#x2014; All the new bias attached to an LM head.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Set all the bias in the LM head.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFPreTrainedModel.set_input_embeddings"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>set_input_embeddings</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFPreTrainedModel.set_input_embeddings" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFPreTrainedModel.set_input_embeddings"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L1043" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">value<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.set_input_embeddings.value" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.set_input_embeddings.value"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>value</strong> (<code>tf.Variable</code>) &#x2014; The new weights mapping hidden states to vocabulary.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Set model’s input embeddings</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFPreTrainedModel.set_output_embeddings"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>set_output_embeddings</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFPreTrainedModel.set_output_embeddings" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFPreTrainedModel.set_output_embeddings"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L1083" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">value<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFPreTrainedModel.set_output_embeddings.value" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFPreTrainedModel.set_output_embeddings.value"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>value</strong> (<code>tf.Variable</code>) &#x2014; The new weights mapping hidden states to vocabulary.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Set model’s output embeddings</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFPreTrainedModel.test_step"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>test_step</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFPreTrainedModel.test_step" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFPreTrainedModel.test_step"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L977" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">data<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>A modification of Keras’s default test_step that cleans up the printed metrics when we use a dummy loss.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFPreTrainedModel.train_step"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>train_step</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFPreTrainedModel.train_step" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFPreTrainedModel.train_step"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L926" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">data<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>A modification of Keras’s default <code>train_step</code> that cleans up the printed metrics when we use a dummy loss. If a user specifies a loss at model compile time, this function behaves as the original Keras <code>train_step</code>. In this case, it expects the same <code>data</code> as the original function (i.e. <code>(inputs, labels)</code>).</p> <p>However, when the model is compiled without specifying the loss AND the expected label columns are passed as part of the input dictionary, the loss is computed internally (inside the model class) and is used in the backwards pass. In this case, <code>data</code> is a singleton tuple containing <code>(inputs,)</code>.</p> <p>This is possible under the aforementioned circumstances because our overriden compile function can set an additional loss function that reduces a <code>loss</code> output, and the model will output a <code>loss</code> component (notice the name matching) containing the loss that was used to train the pre-trained model.</p></div></div> <h2 class="relative group"><a id="transformers.modeling_tf_utils.TFModelUtilsMixin" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_utils.TFModelUtilsMixin"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFModelUtilsMixin </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_utils.TFModelUtilsMixin"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_utils.</span><span class="font-semibold">TFModelUtilsMixin</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_utils.TFModelUtilsMixin" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_utils.TFModelUtilsMixin"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L83" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>A few utilities for <code>tf.keras.Model</code>, to be used as a mixin.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_utils.TFModelUtilsMixin.num_parameters"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>num_parameters</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_utils.TFModelUtilsMixin.num_parameters" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_utils.TFModelUtilsMixin.num_parameters"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L88" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">only_trainable<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>int</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_utils.TFModelUtilsMixin.num_parameters.only_trainable" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_utils.TFModelUtilsMixin.num_parameters.only_trainable"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>only_trainable</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return only the number of trainable parameters<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.modeling_tf_utils.TFModelUtilsMixin.num_parameters.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>int</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The number of parameters.</p> <!-- HTML_TAG_END --></p></div></div> <p>Get the number of (optionally, trainable) parameters in the model.</p></div></div> <h2 class="relative group"><a id="transformers.FlaxPreTrainedModel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxPreTrainedModel </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxPreTrainedModel"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FlaxPreTrainedModel</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FlaxPreTrainedModel" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxPreTrainedModel"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_flax_utils.py#L72" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: PretrainedConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">module<span class="opacity-60">: Module</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_shape<span class="opacity-60">: typing.Tuple = (1, 1)</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">seed<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dtype<span class="opacity-60">: dtype = &lt;class &#39;jax._src.numpy.lax_numpy.float32&#39;&gt;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Base class for all models.</p> <p><a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel">FlaxPreTrainedModel</a> takes care of storing the configuration of the models and handles methods for loading, downloading and saving models.</p> <p>Class attributes (overridden by derived classes):</p> <ul><li><strong>config_class</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>) — A subclass of <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> to use as configuration class for this model architecture.</li> <li><strong>base_model_prefix</strong> (<code>str</code>) — A string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model.</li> <li><strong>main_input_name</strong> (<code>str</code>) — The name of the principal input to the model (often <code>input_ids</code> for NLP models, <code>pixel_values</code> for vision models and <code>input_values</code> for speech models).</li></ul> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.file_utils.PushToHubMixin.push_to_hub"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>push_to_hub</span></h4><!-- HTML_TAG_END --> <a id="transformers.file_utils.PushToHubMixin.push_to_hub" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.file_utils.PushToHubMixin.push_to_hub"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/file_utils.py#L2842" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">repo_path_or_name<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">repo_url<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_temp_dir<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">commit_message<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">organization<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">private<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_auth_token<span class="opacity-60">: typing.Union[bool, str, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**model_card_kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>str</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.repo_path_or_name" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.repo_path_or_name"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>repo_path_or_name</strong> (<code>str</code>, <em>optional</em>) &#x2014; Can either be a repository name for your model in the Hub or a path to a local folder (in which case the repository will have the name of that local folder). If not specified, will default to the name given by <code>repo_url</code> and a local directory with that name will be created.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.repo_url" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.repo_url"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>repo_url</strong> (<code>str</code>, <em>optional</em>) &#x2014; Specify this in case you want to push to an existing repository in the hub. If unspecified, a new repository will be created in your namespace (unless you specify an <code>organization</code>) with <code>repo_name</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.use_temp_dir" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.use_temp_dir"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_temp_dir</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to clone the distant repo in a temporary directory or in <code>repo_path_or_name</code> inside the current working directory. This will slow things down if you are making changes in an existing repo since you will need to clone the repo before every push.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.commit_message" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.commit_message"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>commit_message</strong> (<code>str</code>, <em>optional</em>) &#x2014; Message to commit while pushing. Will default to <code>&quot;add model&quot;</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.organization" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.organization"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>organization</strong> (<code>str</code>, <em>optional</em>) &#x2014; Organization in which you want to push your model (you must be a member of this organization).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.private" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.private"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>private</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not the repository created should be private (requires a paying subscription).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.use_auth_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.use_auth_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_auth_token</strong> (<code>bool</code> or <code>str</code>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>transformers-cli login</code> (stored in <code>~/.huggingface</code>). Will default to <code>True</code> if <code>repo_url</code> is not specified.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.file_utils.PushToHubMixin.push_to_hub.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>str</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The url of the commit of your model in the given repository.</p> <!-- HTML_TAG_END --></p></div></div> <p>Upload the model checkpoint to the 🤗 Model Hub while synchronizing a local clone of the repo in <code>repo_path_or_name</code>.</p> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FlaxAutoModel model = FlaxAutoModel.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-comment"># Push the model to your namespace with the name &quot;my-finetuned-bert&quot; and have a local clone in the</span> <span class="hljs-comment"># *my-finetuned-bert* folder.</span> model.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>) <span class="hljs-comment"># Push the model to your namespace with the name &quot;my-finetuned-bert&quot; with no local clone.</span> model.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, use_temp_dir=<span class="hljs-literal">True</span>) <span class="hljs-comment"># Push the model to an organization with the name &quot;my-finetuned-bert&quot; and have a local clone in the</span> <span class="hljs-comment"># *my-finetuned-bert* folder.</span> model.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, organization=<span class="hljs-string">&quot;huggingface&quot;</span>) <span class="hljs-comment"># Make a change to an existing repo that has been cloned locally in *my-finetuned-bert*.</span> model.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, repo_url=<span class="hljs-string">&quot;https://huggingface.co/sgugger/my-finetuned-bert&quot;</span>)<!-- HTML_TAG_END --></pre></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxPreTrainedModel.from_pretrained"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>from_pretrained</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxPreTrainedModel.from_pretrained" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxPreTrainedModel.from_pretrained"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_flax_utils.py#L296" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pretrained_model_name_or_path<span class="opacity-60">: typing.Union[str, os.PathLike]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dtype<span class="opacity-60">: dtype = &lt;class &#39;jax._src.numpy.lax_numpy.float32&#39;&gt;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*model_args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxPreTrainedModel.from_pretrained.pretrained_model_name_or_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.from_pretrained.pretrained_model_name_or_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pretrained_model_name_or_path</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>A path to a <em>directory</em> containing model weights saved using <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.save_pretrained">save_pretrained()</a>, e.g., <code>./my_model_directory/</code>.</li> <li>A path or url to a <em>pt index checkpoint file</em> (e.g, <code>./tf_model/model.ckpt.index</code>). In this case, <code>from_pt</code> should be set to <code>True</code>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxPreTrainedModel.from_pretrained.dtype" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.from_pretrained.dtype"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dtype</strong> (<code>jax.numpy.dtype</code>, <em>optional</em>, defaults to <code>jax.numpy.float32</code>) &#x2014; The data type of the computation. Can be one of <code>jax.numpy.float32</code>, <code>jax.numpy.float16</code> (on GPUs) and <code>jax.numpy.bfloat16</code> (on TPUs).</p> <p>This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given <code>dtype</code>.</p> <p><strong>Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.</strong></p> <p>If you wish to change the dtype of the model parameters, see <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.to_fp16">to_fp16()</a> and <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.to_bf16">to_bf16()</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxPreTrainedModel.from_pretrained.model_args" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.from_pretrained.model_args"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model_args</strong> (sequence of positional arguments, <em>optional</em>) &#x2014; All remaining positional arguments will be passed to the underlying model&#x2019;s <code>__init__</code> method.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxPreTrainedModel.from_pretrained.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.from_pretrained.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<code>Union[PretrainedConfig, str, os.PathLike]</code>, <em>optional</em>) &#x2014; Can be either:</p> <ul> <li>an instance of a class derived from <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>,</li> <li>a string or path valid as input to <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig.from_pretrained">from_pretrained()</a>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Instantiate a pretrained flax model from a pre-trained model configuration.</p> <p>The warning <em>Weights from XXX not initialized from pretrained model</em> means that the weights of XXX do not come pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning task.</p> <p>The warning <em>Weights from XXX not used in YYY</em> means that the layer XXX is not used by YYY, therefore those weights are discarded.</p> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertConfig, FlaxBertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co and cache.</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Model was saved using *save_pretrained(&#x27;./test/saved_model/&#x27;)* (for example purposes, not runnable).</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBertModel.from_pretrained(<span class="hljs-string">&quot;./test/saved_model/&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Loading from a PyTorch checkpoint file instead of a PyTorch model (slower, for example purposes, not runnable).</span> <span class="hljs-meta">&gt;&gt;&gt; </span>config = BertConfig.from_json_file(<span class="hljs-string">&quot;./pt_model/config.json&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBertModel.from_pretrained(<span class="hljs-string">&quot;./pt_model/pytorch_model.bin&quot;</span>, from_pt=<span class="hljs-literal">True</span>, config=config)<!-- HTML_TAG_END --></pre></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxPreTrainedModel.register_for_auto_class"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>register_for_auto_class</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxPreTrainedModel.register_for_auto_class" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxPreTrainedModel.register_for_auto_class"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_flax_utils.py#L716" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">auto_class<span class="opacity-60"> = &#39;FlaxAutoModel&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxPreTrainedModel.register_for_auto_class.auto_class" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.register_for_auto_class.auto_class"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>auto_class</strong> (<code>str</code> or <code>type</code>, <em>optional</em>, defaults to <code>&quot;FlaxAutoModel&quot;</code>) &#x2014; The auto class to register this new model with.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Register this class with a given auto class. This should only be used for custom models as the ones in the library are already mapped with an auto class.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"><p>This API is experimental and may have some slight breaking changes in the next releases.</p></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxPreTrainedModel.save_pretrained"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save_pretrained</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxPreTrainedModel.save_pretrained" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxPreTrainedModel.save_pretrained"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_flax_utils.py#L659" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_directory<span class="opacity-60">: typing.Union[str, os.PathLike]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">push_to_hub<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxPreTrainedModel.save_pretrained.save_directory" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.save_pretrained.save_directory"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>save_directory</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; Directory to which to save. Will be created if it doesn&#x2019;t exist.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxPreTrainedModel.save_pretrained.push_to_hub" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.save_pretrained.push_to_hub"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>push_to_hub</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to push your model to the Hugging Face model hub after saving it.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p>Using <code>push_to_hub=True</code> will synchronize the repository you are pushing to with <code>save_directory</code>, which requires <code>save_directory</code> to be a local clone of the repo you are pushing to if it&#x2019;s an existing folder. Pass along <code>temp_dir=True</code> to use a temporary directory instead.</p> </div> <p>kwargs &#x2014; Additional key word arguments passed along to the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.file_utils.PushToHubMixin.push_to_hub">push_to_hub()</a> method.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Save a model and its configuration file to a directory, so that it can be re-loaded using the <code>[from_pretrained()](/docs/transformers/pr_16143/en/main_classes/model#transformers.FlaxPreTrainedModel.from_pretrained)</code> class method</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxPreTrainedModel.to_bf16"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>to_bf16</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxPreTrainedModel.to_bf16" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxPreTrainedModel.to_bf16"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_flax_utils.py#L191" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60">: typing.Union[typing.Dict, flax.core.frozen_dict.FrozenDict]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask<span class="opacity-60">: typing.Any = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxPreTrainedModel.to_bf16.params" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.to_bf16.params"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>params</strong> (<code>Union[Dict, FrozenDict]</code>) &#x2014; A <code>PyTree</code> of model parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxPreTrainedModel.to_bf16.mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.to_bf16.mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mask</strong> (<code>Union[Dict, FrozenDict]</code>) &#x2014; A <code>PyTree</code> with same structure as the <code>params</code> tree. The leaves should be booleans, <code>True</code> for params you want to cast, and should be <code>False</code> for those you want to skip.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Cast the floating-point <code>params</code> to <code>jax.numpy.bfloat16</code>. This returns a new <code>params</code> tree and does not cast the <code>params</code> in place.</p> <p>This method can be used on TPU to explicitly convert the model parameters to bfloat16 precision to do full half-precision training or to save weights in bfloat16 for inference in order to save memory and improve speed.</p> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FlaxBertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># load model</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># By default, the model parameters will be in fp32 precision, to cast these to bfloat16 precision</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.params = model.to_bf16(model.params) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># If you want don&#x27;t want to cast certain parameters (for example layer norm bias and scale)</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># then pass the mask as follows</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> flax <span class="hljs-keyword">import</span> traverse_util <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>flat_params = traverse_util.flatten_dict(model.params) <span class="hljs-meta">&gt;&gt;&gt; </span>mask = { <span class="hljs-meta">... </span> path: (path[-<span class="hljs-number">2</span>] != (<span class="hljs-string">&quot;LayerNorm&quot;</span>, <span class="hljs-string">&quot;bias&quot;</span>) <span class="hljs-keyword">and</span> path[-<span class="hljs-number">2</span>:] != (<span class="hljs-string">&quot;LayerNorm&quot;</span>, <span class="hljs-string">&quot;scale&quot;</span>)) <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> path <span class="hljs-keyword">in</span> flat_params <span class="hljs-meta">... </span>} <span class="hljs-meta">&gt;&gt;&gt; </span>mask = traverse_util.unflatten_dict(mask) <span class="hljs-meta">&gt;&gt;&gt; </span>model.params = model.to_bf16(model.params, mask)<!-- HTML_TAG_END --></pre></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxPreTrainedModel.to_fp16"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>to_fp16</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxPreTrainedModel.to_fp16" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxPreTrainedModel.to_fp16"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_flax_utils.py#L257" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60">: typing.Union[typing.Dict, flax.core.frozen_dict.FrozenDict]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask<span class="opacity-60">: typing.Any = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxPreTrainedModel.to_fp16.params" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.to_fp16.params"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>params</strong> (<code>Union[Dict, FrozenDict]</code>) &#x2014; A <code>PyTree</code> of model parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxPreTrainedModel.to_fp16.mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.to_fp16.mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mask</strong> (<code>Union[Dict, FrozenDict]</code>) &#x2014; A <code>PyTree</code> with same structure as the <code>params</code> tree. The leaves should be booleans, <code>True</code> for params you want to cast, and should be <code>False</code> for those you want to skip<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Cast the floating-point <code>parmas</code> to <code>jax.numpy.float16</code>. This returns a new <code>params</code> tree and does not cast the <code>params</code> in place.</p> <p>This method can be used on GPU to explicitly convert the model parameters to float16 precision to do full half-precision training or to save weights in float16 for inference in order to save memory and improve speed.</p> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FlaxBertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># load model</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># By default, the model params will be in fp32, to cast these to float16</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.params = model.to_fp16(model.params) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># If you want don&#x27;t want to cast certain parameters (for example layer norm bias and scale)</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># then pass the mask as follows</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> flax <span class="hljs-keyword">import</span> traverse_util <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>flat_params = traverse_util.flatten_dict(model.params) <span class="hljs-meta">&gt;&gt;&gt; </span>mask = { <span class="hljs-meta">... </span> path: (path[-<span class="hljs-number">2</span>] != (<span class="hljs-string">&quot;LayerNorm&quot;</span>, <span class="hljs-string">&quot;bias&quot;</span>) <span class="hljs-keyword">and</span> path[-<span class="hljs-number">2</span>:] != (<span class="hljs-string">&quot;LayerNorm&quot;</span>, <span class="hljs-string">&quot;scale&quot;</span>)) <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> path <span class="hljs-keyword">in</span> flat_params <span class="hljs-meta">... </span>} <span class="hljs-meta">&gt;&gt;&gt; </span>mask = traverse_util.unflatten_dict(mask) <span class="hljs-meta">&gt;&gt;&gt; </span>model.params = model.to_fp16(model.params, mask)<!-- HTML_TAG_END --></pre></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxPreTrainedModel.to_fp32"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>to_fp32</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxPreTrainedModel.to_fp32" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxPreTrainedModel.to_fp32"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_flax_utils.py#L230" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60">: typing.Union[typing.Dict, flax.core.frozen_dict.FrozenDict]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask<span class="opacity-60">: typing.Any = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxPreTrainedModel.to_fp32.params" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.to_fp32.params"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>params</strong> (<code>Union[Dict, FrozenDict]</code>) &#x2014; A <code>PyTree</code> of model parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxPreTrainedModel.to_fp32.mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxPreTrainedModel.to_fp32.mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mask</strong> (<code>Union[Dict, FrozenDict]</code>) &#x2014; A <code>PyTree</code> with same structure as the <code>params</code> tree. The leaves should be booleans, <code>True</code> for params you want to cast, and should be <code>False</code> for those you want to skip<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Cast the floating-point <code>parmas</code> to <code>jax.numpy.float32</code>. This method can be used to explicitly convert the model parameters to fp32 precision. This returns a new <code>params</code> tree and does not cast the <code>params</code> in place.</p> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> FlaxBertModel <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Download model and configuration from huggingface.co</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model = FlaxBertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># By default, the model params will be in fp32, to illustrate the use of this method,</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># we&#x27;ll first cast to fp16 and back to fp32</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.params = model.to_f16(model.params) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># now cast back to fp32</span> <span class="hljs-meta">&gt;&gt;&gt; </span>model.params = model.to_fp32(model.params)<!-- HTML_TAG_END --></pre></div></div></div> <h2 class="relative group"><a id="transformers.file_utils.PushToHubMixin" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Pushing to the Hub </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.file_utils.PushToHubMixin"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.file_utils.</span><span class="font-semibold">PushToHubMixin</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.file_utils.PushToHubMixin" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.file_utils.PushToHubMixin"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/file_utils.py#L2837" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>A Mixin containing the functionality to push a model or tokenizer to the hub.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.file_utils.PushToHubMixin.push_to_hub"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>push_to_hub</span></h4><!-- HTML_TAG_END --> <a id="transformers.file_utils.PushToHubMixin.push_to_hub" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.file_utils.PushToHubMixin.push_to_hub"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/file_utils.py#L2842" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">repo_path_or_name<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">repo_url<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_temp_dir<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">commit_message<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">organization<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">private<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_auth_token<span class="opacity-60">: typing.Union[bool, str, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**model_card_kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>str</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.repo_path_or_name" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.repo_path_or_name"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>repo_path_or_name</strong> (<code>str</code>, <em>optional</em>) &#x2014; Can either be a repository name for your {object} in the Hub or a path to a local folder (in which case the repository will have the name of that local folder). If not specified, will default to the name given by <code>repo_url</code> and a local directory with that name will be created.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.repo_url" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.repo_url"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>repo_url</strong> (<code>str</code>, <em>optional</em>) &#x2014; Specify this in case you want to push to an existing repository in the hub. If unspecified, a new repository will be created in your namespace (unless you specify an <code>organization</code>) with <code>repo_name</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.use_temp_dir" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.use_temp_dir"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_temp_dir</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to clone the distant repo in a temporary directory or in <code>repo_path_or_name</code> inside the current working directory. This will slow things down if you are making changes in an existing repo since you will need to clone the repo before every push.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.commit_message" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.commit_message"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>commit_message</strong> (<code>str</code>, <em>optional</em>) &#x2014; Message to commit while pushing. Will default to <code>&quot;add {object}&quot;</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.organization" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.organization"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>organization</strong> (<code>str</code>, <em>optional</em>) &#x2014; Organization in which you want to push your {object} (you must be a member of this organization).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.private" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.private"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>private</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not the repository created should be private (requires a paying subscription).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.use_auth_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.use_auth_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_auth_token</strong> (<code>bool</code> or <code>str</code>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>transformers-cli login</code> (stored in <code>~/.huggingface</code>). Will default to <code>True</code> if <code>repo_url</code> is not specified.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.file_utils.PushToHubMixin.push_to_hub.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>str</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The url of the commit of your {object} in the given repository.</p> <!-- HTML_TAG_END --></p></div></div> <p>Upload the {object_files} to the 🤗 Model Hub while synchronizing a local clone of the repo in <code>repo_path_or_name</code>.</p> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> {object_class} {<span class="hljs-built_in">object</span>} = {object_class}.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-comment"># Push the {object} to your namespace with the name &quot;my-finetuned-bert&quot; and have a local clone in the</span> <span class="hljs-comment"># *my-finetuned-bert* folder.</span> {<span class="hljs-built_in">object</span>}.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>) <span class="hljs-comment"># Push the {object} to your namespace with the name &quot;my-finetuned-bert&quot; with no local clone.</span> {<span class="hljs-built_in">object</span>}.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, use_temp_dir=<span class="hljs-literal">True</span>) <span class="hljs-comment"># Push the {object} to an organization with the name &quot;my-finetuned-bert&quot; and have a local clone in the</span> <span class="hljs-comment"># *my-finetuned-bert* folder.</span> {<span class="hljs-built_in">object</span>}.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, organization=<span class="hljs-string">&quot;huggingface&quot;</span>) <span class="hljs-comment"># Make a change to an existing repo that has been cloned locally in *my-finetuned-bert*.</span> {<span class="hljs-built_in">object</span>}.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, repo_url=<span class="hljs-string">&quot;https://huggingface.co/sgugger/my-finetuned-bert&quot;</span>)<!-- HTML_TAG_END --></pre></div></div></div> <script type="module" data-hydrate="17uz9cw"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="17uz9cw"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/main_classes/model.mdx-5e2df875.js") ], params: {} } }); </script>
429
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/main_classes/configuration.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;configuration&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.PretrainedConfig&quot;,&quot;title&quot;:&quot;PretrainedConfig&quot;}],&quot;title&quot;:&quot;Configuration&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/main_classes/configuration.mdx-5be476eb.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="configuration" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#configuration"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Configuration </span></h1> <p>The base class <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> implements the common methods for loading/saving a configuration either from a local file or directory, or from a pretrained model configuration provided by the library (downloaded from HuggingFace’s AWS S3 repository).</p> <p>Each derived config class implements model specific attributes. Common attributes present in all config classes are: <code>hidden_size</code>, <code>num_attention_heads</code>, and <code>num_hidden_layers</code>. Text models further implement: <code>vocab_size</code>.</p> <h2 class="relative group"><a id="transformers.PretrainedConfig" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>PretrainedConfig </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PretrainedConfig"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">PretrainedConfig</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.PretrainedConfig" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PretrainedConfig"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/configuration_utils.py#L53" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.name_or_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.name_or_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>name_or_path</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;&quot;</code>) &#x2014; Store the string that was passed to <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.from_pretrained">PreTrainedModel.from_pretrained()</a> or <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.TFPreTrainedModel.from_pretrained">TFPreTrainedModel.from_pretrained()</a> as <code>pretrained_model_name_or_path</code> if the configuration was created with such a method.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.output_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.output_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_hidden_states</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the model should return all hidden-states.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.output_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.output_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_attentions</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the model should returns all attentions.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not the model should return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.is_encoder_decoder" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.is_encoder_decoder"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>is_encoder_decoder</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether the model is used as an encoder/decoder or not.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.is_decoder" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.is_decoder"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>is_decoder</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether the model is used as decoder or not (in which case it&#x2019;s used as an encoder).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.cross_attention_hidden_size**" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.cross_attention_hidden_size**"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attention_hidden_size**</strong> (<code>bool</code>, <em>optional</em>) &#x2014; The hidden size of the cross-attention layer in case the model is used as a decoder in an encoder-decoder setting and the cross-attention hidden dimension differs from <code>self.config.hidden_size</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.add_cross_attention" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.add_cross_attention"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>add_cross_attention</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether cross-attention layers should be added to the model. Note, this option is only relevant for models that can be used as decoder models within the <a href="/docs/transformers/pr_16143/en/model_doc/encoder-decoder#transformers.EncoderDecoderModel">EncoderDecoderModel</a> class, which consists of all models in <code>AUTO_MODELS_FOR_CAUSAL_LM</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.tie_encoder_decoder" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.tie_encoder_decoder"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tie_encoder_decoder</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether all encoder weights should be tied to their equivalent decoder weights. This requires the encoder and decoder model to have the exact same parameter names.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.prune_heads" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.prune_heads"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>prune_heads</strong> (<code>Dict[int, List[int]]</code>, <em>optional</em>, defaults to <code>{}</code>) &#x2014; Pruned heads of the model. The keys are the selected layer indices and the associated values, the list of heads to prune in said layer.</p> <p>For instance <code>{1: [0, 2], 2: [2, 3]}</code> will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.chunk_size_feed_forward" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.chunk_size_feed_forward"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>chunk_size_feed_forward</strong> (<code>int</code>, <em>optional</em>, defaults to <code>0</code>) &#x2014; The chunk size of all feed forward layers in the residual attention blocks. A chunk size of <code>0</code> means that the feed forward layer is not chunked. A chunk size of n means that the feed forward layer processes <code>n</code> &lt; sequence_length embeddings at a time. For more information on feed forward chunking, see <a href="../glossary.html#feed-forward-chunking">How does Feed Forward Chunking work?</a>.<!-- HTML_TAG_END --> </span></span> </li></ul> <p class="flex items-center font-semibold">Parameters for sequence generation <span class="flex-auto border-t-2 ml-3"></span></p> <p><!-- HTML_TAG_START --> <ul> <li><strong>max_length</strong> (<code>int</code>, <em>optional</em>, defaults to 20) — Maximum length that will be used by default in the <code>generate</code> method of the model.</li> <li><strong>min_length</strong> (<code>int</code>, <em>optional</em>, defaults to 10) — Minimum length that will be used by default in the <code>generate</code> method of the model.</li> <li><strong>do_sample</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) — Flag that will be used by default in the <code>generate</code> method of the model. Whether or not to use sampling ; use greedy decoding otherwise.</li> <li><strong>early_stopping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) — Flag that will be used by default in the <code>generate</code> method of the model. Whether to stop the beam search when at least <code>num_beams</code> sentences are finished per batch or not.</li> <li><strong>num_beams</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — Number of beams for beam search that will be used by default in the <code>generate</code> method of the model. 1 means no beam search.</li> <li><strong>num_beam_groups</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — Number of groups to divide <code>num_beams</code> into in order to ensure diversity among different groups of beams that will be used by default in the <code>generate</code> method of the model. 1 means no group beam search.</li> <li><strong>diversity_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) — Value to control diversity for group beam search. that will be used by default in the <code>generate</code> method of the model. 0 means no diversity penalty. The higher the penalty, the more diverse are the outputs.</li> <li><strong>temperature</strong> (<code>float</code>, <em>optional</em>, defaults to 1) — The value used to module the next token probabilities that will be used by default in the <code>generate</code> method of the model. Must be strictly positive.</li> <li><strong>top_k</strong> (<code>int</code>, <em>optional</em>, defaults to 50) — Number of highest probability vocabulary tokens to keep for top-k-filtering that will be used by default in the <code>generate</code> method of the model.</li> <li><strong>top_p</strong> (<code>float</code>, <em>optional</em>, defaults to 1) — Value that will be used by default in the <code>generate</code> method of the model for <code>top_p</code>. If set to float < 1, only the most probable tokens with probabilities that add up to <code>top_p</code> or higher are kept for generation.</li> <li><strong>repetition_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to 1) — Parameter for repetition penalty that will be used by default in the <code>generate</code> method of the model. 1.0 means no penalty.</li> <li><strong>length_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to 1) — Exponential penalty to the length that will be used by default in the <code>generate</code> method of the model.</li> <li><strong>no_repeat_ngram_size</strong> (<code>int</code>, <em>optional</em>, defaults to 0) — Value that will be used by default in the — <code>generate</code> method of the model for <code>no_repeat_ngram_size</code>. If set to int > 0, all ngrams of that size can only occur once.</li> <li><strong>encoder_no_repeat_ngram_size</strong> (<code>int</code>, <em>optional</em>, defaults to 0) — Value that will be used by — default in the <code>generate</code> method of the model for <code>encoder_no_repeat_ngram_size</code>. If set to int > 0, all ngrams of that size that occur in the <code>encoder_input_ids</code> cannot occur in the <code>decoder_input_ids</code>.</li> <li><strong>bad_words_ids</strong> (<code>List[int]</code>, <em>optional</em>) — List of token ids that are not allowed to be generated that will be used by default in the <code>generate</code> method of the model. In order to get the tokens of the words that should not appear in the generated text, use <code>tokenizer.encode(bad_word, add_prefix_space=True)</code>.</li> <li><strong>num_return_sequences</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — Number of independently computed returned sequences for each element in the batch that will be used by default in the <code>generate</code> method of the model.</li> <li><strong>output_scores</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) — Whether the model should return the logits when used for generation.</li> <li><strong>return_dict_in_generate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) — Whether the model should return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput" >ModelOutput</a> instead of a <code>torch.LongTensor</code>.</li> <li><strong>forced_bos_token_id</strong> (<code>int</code>, <em>optional</em>) — The id of the token to force as the first generated token after the <code>decoder_start_token_id</code>. Useful for multilingual models like <a href="../model_doc/mbart">mBART</a> where the first generated token needs to be the target language token.</li> <li><strong>forced_eos_token_id</strong> (<code>int</code>, <em>optional</em>) — The id of the token to force as the last generated token when <code>max_length</code> is reached.</li> <li><strong>remove_invalid_values</strong> (<code>bool</code>, <em>optional</em>) — Whether to remove possible <em>nan</em> and <em>inf</em> outputs of the model to prevent the generation method to crash. Note that using <code>remove_invalid_values</code> can slow down generation.</li> </ul> <!-- HTML_TAG_END --></p><p class="flex items-center font-semibold">Parameters for fine-tuning tasks <span class="flex-auto border-t-2 ml-3"></span></p> <p><!-- HTML_TAG_START --> <ul> <li><strong>architectures</strong> (<code>List[str]</code>, <em>optional</em>) — Model architectures that can be used with the model pretrained weights.</li> <li><strong>finetuning_task</strong> (<code>str</code>, <em>optional</em>) — Name of the task used to fine-tune the model. This can be used when converting from an original (TensorFlow or PyTorch) checkpoint.</li> <li><strong>id2label</strong> (<code>Dict[int, str]</code>, <em>optional</em>) — A map from index (for instance prediction index, or target index) to label.</li> <li><strong>label2id</strong> (<code>Dict[str, int]</code>, <em>optional</em>) — A map from label to index for the model.</li> <li><strong>num_labels</strong> (<code>int</code>, <em>optional</em>) — Number of labels to use in the last layer added to the model, typically for a classification task.</li> <li><strong>task_specific_params</strong> (<code>Dict[str, Any]</code>, <em>optional</em>) — Additional keyword arguments to store for the current task.</li> <li><strong>problem_type</strong> (<code>str</code>, <em>optional</em>) — Problem type for <code>XxxForSequenceClassification</code> models. Can be one of <code>"regression"</code>, <code>"single_label_classification"</code> or <code>"multi_label_classification"</code>.</li> </ul> <!-- HTML_TAG_END --></p><p class="flex items-center font-semibold">Parameters linked to the tokenizer <span class="flex-auto border-t-2 ml-3"></span></p> <p><!-- HTML_TAG_START --> <ul> <li><strong>tokenizer_class</strong> (<code>str</code>, <em>optional</em>) — The name of the associated tokenizer class to use (if none is set, will use the tokenizer associated to the model by default).</li> <li><strong>prefix</strong> (<code>str</code>, <em>optional</em>) — A specific prompt that should be added at the beginning of each text before calling the model.</li> <li><strong>bos_token_id</strong> (<code>int</code>, <em>optional</em>) — The id of the <em>beginning-of-stream</em> token.</li> <li><strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) — The id of the <em>padding</em> token.</li> <li><strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) — The id of the <em>end-of-stream</em> token.</li> <li><strong>decoder_start_token_id</strong> (<code>int</code>, <em>optional</em>) — If an encoder-decoder model starts decoding with a different token than <em>bos</em>, the id of that token.</li> <li><strong>sep_token_id</strong> (<code>int</code>, <em>optional</em>) — The id of the <em>separation</em> token.</li> </ul> <!-- HTML_TAG_END --></p><p class="flex items-center font-semibold">PyTorch specific parameters <span class="flex-auto border-t-2 ml-3"></span></p> <p><!-- HTML_TAG_START --> <ul> <li> <p><strong>torchscript</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) — Whether or not the model should be used with Torchscript.</p> </li> <li> <p><strong>tie_word_embeddings</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — Whether the model’s input and output word embeddings should be tied. Note that this is only relevant if the model has a output word embedding layer.</p> </li> <li> <p><strong>torch_dtype</strong> (<code>str</code>, <em>optional</em>) — The <code>dtype</code> of the weights. This attribute can be used to initialize the model to a non-default <code>dtype</code> (which is normally <code>float32</code>) and thus allow for optimal storage allocation. For example, if the saved model is <code>float16</code>, ideally we want to load it back using the minimal amount of memory needed to load <code>float16</code> weights. Since the config object is stored in plain text, this attribute contains just the floating type string without the <code>torch.</code> prefix. For example, for <code>torch.float16</code> `<code>torch_dtype</code> is the <code>"float16"</code> string.</p> <p>This attribute is currently not being used during model loading time, but this may change in the future versions. But we can already start preparing for the future by saving the dtype with save_pretrained.</p> </li> </ul> <!-- HTML_TAG_END --></p><p class="flex items-center font-semibold">TensorFlow specific parameters <span class="flex-auto border-t-2 ml-3"></span></p> <p><!-- HTML_TAG_START --> <ul> <li><strong>use_bfloat16</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) — Whether or not the model should use BFloat16 scalars (only used by some TensorFlow models).</li> </ul> <!-- HTML_TAG_END --></p> </div></div> <p>Base class for all configuration classes. Handles a few parameters common to all models’ configurations as well as methods for loading/downloading/saving configurations.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>A configuration file can be loaded and saved to disk. Loading the configuration file and using this file to initialize a model does <strong>not</strong> load the model weights. It only affects the model’s configuration.</p></div> <p>Class attributes (overridden by derived classes):</p> <ul><li><strong>model_type</strong> (<code>str</code>) — An identifier for the model type, serialized into the JSON file, and used to recreate the correct object in <a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoConfig">AutoConfig</a>.</li> <li><strong>is_composition</strong> (<code>bool</code>) — Whether the config class is composed of multiple sub-configs. In this case the config has to be initialized from two or more configs of type <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> like: <a href="/docs/transformers/pr_16143/en/model_doc/encoder-decoder#transformers.EncoderDecoderConfig">EncoderDecoderConfig</a> or <a href="/docs/transformers/pr_16143/en/model_doc/rag#transformers.RagConfig">~RagConfig</a>.</li> <li><strong>keys_to_ignore_at_inference</strong> (<code>List[str]</code>) — A list of keys to ignore by default when looking at dictionary outputs of the model during inference.</li> <li><strong>attribute_map</strong> (<code>Dict[str, str]</code>) — A dict that maps model specific attribute names to the standardized naming of attributes.</li></ul> <p>Common attributes (present in all subclasses):</p> <ul><li><strong>vocab_size</strong> (<code>int</code>) — The number of tokens in the vocabulary, which is also the first dimension of the embeddings matrix (this attribute may be missing for models that don’t have a text modality like ViT).</li> <li><strong>hidden_size</strong> (<code>int</code>) — The hidden size of the model.</li> <li><strong>num_attention_heads</strong> (<code>int</code>) — The number of attention heads used in the multi-head attention layers of the model.</li> <li><strong>num_hidden_layers</strong> (<code>int</code>) — The number of blocks in the model.</li></ul> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.file_utils.PushToHubMixin.push_to_hub"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>push_to_hub</span></h4><!-- HTML_TAG_END --> <a id="transformers.file_utils.PushToHubMixin.push_to_hub" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.file_utils.PushToHubMixin.push_to_hub"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/file_utils.py#L2842" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">repo_path_or_name<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">repo_url<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_temp_dir<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">commit_message<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">organization<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">private<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_auth_token<span class="opacity-60">: typing.Union[bool, str, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**model_card_kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>str</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.repo_path_or_name" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.repo_path_or_name"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>repo_path_or_name</strong> (<code>str</code>, <em>optional</em>) &#x2014; Can either be a repository name for your config in the Hub or a path to a local folder (in which case the repository will have the name of that local folder). If not specified, will default to the name given by <code>repo_url</code> and a local directory with that name will be created.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.repo_url" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.repo_url"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>repo_url</strong> (<code>str</code>, <em>optional</em>) &#x2014; Specify this in case you want to push to an existing repository in the hub. If unspecified, a new repository will be created in your namespace (unless you specify an <code>organization</code>) with <code>repo_name</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.use_temp_dir" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.use_temp_dir"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_temp_dir</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to clone the distant repo in a temporary directory or in <code>repo_path_or_name</code> inside the current working directory. This will slow things down if you are making changes in an existing repo since you will need to clone the repo before every push.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.commit_message" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.commit_message"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>commit_message</strong> (<code>str</code>, <em>optional</em>) &#x2014; Message to commit while pushing. Will default to <code>&quot;add config&quot;</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.organization" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.organization"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>organization</strong> (<code>str</code>, <em>optional</em>) &#x2014; Organization in which you want to push your config (you must be a member of this organization).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.private" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.private"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>private</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not the repository created should be private (requires a paying subscription).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.use_auth_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.use_auth_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_auth_token</strong> (<code>bool</code> or <code>str</code>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>transformers-cli login</code> (stored in <code>~/.huggingface</code>). Will default to <code>True</code> if <code>repo_url</code> is not specified.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.file_utils.PushToHubMixin.push_to_hub.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>str</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The url of the commit of your config in the given repository.</p> <!-- HTML_TAG_END --></p></div></div> <p>Upload the configuration file to the 🤗 Model Hub while synchronizing a local clone of the repo in <code>repo_path_or_name</code>.</p> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig config = AutoConfig.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-comment"># Push the config to your namespace with the name &quot;my-finetuned-bert&quot; and have a local clone in the</span> <span class="hljs-comment"># *my-finetuned-bert* folder.</span> config.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>) <span class="hljs-comment"># Push the config to your namespace with the name &quot;my-finetuned-bert&quot; with no local clone.</span> config.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, use_temp_dir=<span class="hljs-literal">True</span>) <span class="hljs-comment"># Push the config to an organization with the name &quot;my-finetuned-bert&quot; and have a local clone in the</span> <span class="hljs-comment"># *my-finetuned-bert* folder.</span> config.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, organization=<span class="hljs-string">&quot;huggingface&quot;</span>) <span class="hljs-comment"># Make a change to an existing repo that has been cloned locally in *my-finetuned-bert*.</span> config.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, repo_url=<span class="hljs-string">&quot;https://huggingface.co/sgugger/my-finetuned-bert&quot;</span>)<!-- HTML_TAG_END --></pre></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PretrainedConfig.dict_torch_dtype_to_str"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>dict_torch_dtype_to_str</span></h4><!-- HTML_TAG_END --> <a id="transformers.PretrainedConfig.dict_torch_dtype_to_str" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PretrainedConfig.dict_torch_dtype_to_str"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/configuration_utils.py#L851" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">d<span class="opacity-60">: typing.Dict[str, typing.Any]</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Checks whether the passed dictionary and its nested dicts have a <em>torch_dtype</em> key and if it’s not None, converts torch.dtype to a string of just the type. For example, <code>torch.float32</code> get converted into <em>“float32”</em> string, which can then be stored in the json format.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PretrainedConfig.from_dict"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>from_dict</span></h4><!-- HTML_TAG_END --> <a id="transformers.PretrainedConfig.from_dict" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PretrainedConfig.from_dict"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/configuration_utils.py#L653" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config_dict<span class="opacity-60">: typing.Dict[str, typing.Any]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig" >PretrainedConfig</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.from_dict.config_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.from_dict.config_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config_dict</strong> (<code>Dict[str, Any]</code>) &#x2014; Dictionary that will be used to instantiate the configuration object. Such a dictionary can be retrieved from a pretrained checkpoint by leveraging the <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig.get_config_dict">get_config_dict()</a> method.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.from_dict.kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.from_dict.kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>kwargs</strong> (<code>Dict[str, Any]</code>) &#x2014; Additional parameters from which to initialize the configuration object.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PretrainedConfig.from_dict.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig" >PretrainedConfig</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The configuration object instantiated from those parameters.</p> <!-- HTML_TAG_END --></p></div></div> <p>Instantiates a <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> from a Python dictionary of parameters.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PretrainedConfig.from_json_file"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>from_json_file</span></h4><!-- HTML_TAG_END --> <a id="transformers.PretrainedConfig.from_json_file" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PretrainedConfig.from_json_file"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/configuration_utils.py#L691" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">json_file<span class="opacity-60">: typing.Union[str, os.PathLike]</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig" >PretrainedConfig</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.from_json_file.json_file" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.from_json_file.json_file"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>json_file</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; Path to the JSON file containing the parameters.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PretrainedConfig.from_json_file.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig" >PretrainedConfig</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The configuration object instantiated from that JSON file.</p> <!-- HTML_TAG_END --></p></div></div> <p>Instantiates a <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> from the path to a JSON file of parameters.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PretrainedConfig.from_pretrained"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>from_pretrained</span></h4><!-- HTML_TAG_END --> <a id="transformers.PretrainedConfig.from_pretrained" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PretrainedConfig.from_pretrained"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/configuration_utils.py#L446" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pretrained_model_name_or_path<span class="opacity-60">: typing.Union[str, os.PathLike]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig" >PretrainedConfig</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.from_pretrained.pretrained_model_name_or_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.from_pretrained.pretrained_model_name_or_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pretrained_model_name_or_path</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; This can be either:</p> <ul> <li>a string, the <em>model id</em> of a pretrained model configuration hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>a path to a <em>directory</em> containing a configuration file saved using the <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig.save_pretrained">save_pretrained()</a> method, e.g., <code>./my_model_directory/</code>.</li> <li>a path or url to a saved configuration JSON <em>file</em>, e.g., <code>./my_model_directory/configuration.json</code>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.from_pretrained.cache_dir" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.from_pretrained.cache_dir"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cache_dir</strong> (<code>str</code> or <code>os.PathLike</code>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.from_pretrained.force_download" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.from_pretrained.force_download"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>force_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to force to (re-)download the configuration files and override the cached versions if they exist.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.from_pretrained.resume_download" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.from_pretrained.resume_download"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>resume_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.from_pretrained.proxies" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.from_pretrained.proxies"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>proxies</strong> (<code>Dict[str, str]</code>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <code>{&apos;http&apos;: &apos;foo.bar:3128&apos;, &apos;http://hostname&apos;: &apos;foo.bar:4012&apos;}.</code> The proxies are used on each request.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.from_pretrained.use_auth_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.from_pretrained.use_auth_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_auth_token</strong> (<code>str</code> or <em>bool</em>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>transformers-cli login</code> (stored in <code>~/.huggingface</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.from_pretrained.revision(str," class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.from_pretrained.revision(str,"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>revision(<code>str</code>,</strong> <em>optional</em>, defaults to <code>&quot;main&quot;</code>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <code>revision</code> can be any identifier allowed by git.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.from_pretrained.return_unused_kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.from_pretrained.return_unused_kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_unused_kwargs</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; If <code>False</code>, then this function returns just the final configuration object.</p> <p>If <code>True</code>, then this functions returns a <code>Tuple(config, unused_kwargs)</code> where <em>unused_kwargs</em> is a dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e., the part of <code>kwargs</code> which has not been used to update <code>config</code> and is otherwise ignored.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.from_pretrained.kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.from_pretrained.kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>kwargs</strong> (<code>Dict[str, Any]</code>, <em>optional</em>) &#x2014; The values in kwargs of any keys which are configuration attributes will be used to override the loaded values. Behavior concerning key/value pairs whose keys are <em>not</em> configuration attributes is controlled by the <code>return_unused_kwargs</code> keyword parameter.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PretrainedConfig.from_pretrained.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig" >PretrainedConfig</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The configuration object instantiated from this pretrained model.</p> <!-- HTML_TAG_END --></p></div></div> <p>Instantiate a <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> (or a derived class) from a pretrained model configuration.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Passing <code>use_auth_token=True</code> is required when you want to use a private model.</p></div> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-comment"># We can&#x27;t instantiate directly the base class *PretrainedConfig* so let&#x27;s show the examples on a</span> <span class="hljs-comment"># derived class: BertConfig</span> config = BertConfig.from_pretrained( <span class="hljs-string">&quot;bert-base-uncased&quot;</span> ) <span class="hljs-comment"># Download configuration from huggingface.co and cache.</span> config = BertConfig.from_pretrained( <span class="hljs-string">&quot;./test/saved_model/&quot;</span> ) <span class="hljs-comment"># E.g. config (or model) was saved using *save_pretrained(&#x27;./test/saved_model/&#x27;)*</span> config = BertConfig.from_pretrained(<span class="hljs-string">&quot;./test/saved_model/my_configuration.json&quot;</span>) config = BertConfig.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>, output_attentions=<span class="hljs-literal">True</span>, foo=<span class="hljs-literal">False</span>) <span class="hljs-keyword">assert</span> config.output_attentions == <span class="hljs-literal">True</span> config, unused_kwargs = BertConfig.from_pretrained( <span class="hljs-string">&quot;bert-base-uncased&quot;</span>, output_attentions=<span class="hljs-literal">True</span>, foo=<span class="hljs-literal">False</span>, return_unused_kwargs=<span class="hljs-literal">True</span> ) <span class="hljs-keyword">assert</span> config.output_attentions == <span class="hljs-literal">True</span> <span class="hljs-keyword">assert</span> unused_kwargs == {<span class="hljs-string">&quot;foo&quot;</span>: <span class="hljs-literal">False</span>}<!-- HTML_TAG_END --></pre></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PretrainedConfig.get_config_dict"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_config_dict</span></h4><!-- HTML_TAG_END --> <a id="transformers.PretrainedConfig.get_config_dict" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PretrainedConfig.get_config_dict"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/configuration_utils.py#L529" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pretrained_model_name_or_path<span class="opacity-60">: typing.Union[str, os.PathLike]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>Tuple[Dict, Dict]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.get_config_dict.pretrained_model_name_or_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.get_config_dict.pretrained_model_name_or_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pretrained_model_name_or_path</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PretrainedConfig.get_config_dict.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>Tuple[Dict, Dict]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The dictionary(ies) that will be used to instantiate the configuration object.</p> <!-- HTML_TAG_END --></p></div></div> <p>From a <code>pretrained_model_name_or_path</code>, resolve to a dictionary of parameters, to be used for instantiating a <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a> using <code>from_dict</code>.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PretrainedConfig.register_for_auto_class"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>register_for_auto_class</span></h4><!-- HTML_TAG_END --> <a id="transformers.PretrainedConfig.register_for_auto_class" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PretrainedConfig.register_for_auto_class"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/configuration_utils.py#L863" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">auto_class<span class="opacity-60"> = &#39;AutoConfig&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.register_for_auto_class.auto_class" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.register_for_auto_class.auto_class"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>auto_class</strong> (<code>str</code> or <code>type</code>, <em>optional</em>, defaults to <code>&quot;AutoConfig&quot;</code>) &#x2014; The auto class to register this new configuration with.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Register this class with a given auto class. This should only be used for custom configurations as the ones in the library are already mapped with <code>AutoConfig</code>.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"><p>This API is experimental and may have some slight breaking changes in the next releases.</p></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PretrainedConfig.save_pretrained"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save_pretrained</span></h4><!-- HTML_TAG_END --> <a id="transformers.PretrainedConfig.save_pretrained" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PretrainedConfig.save_pretrained"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/configuration_utils.py#L400" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_directory<span class="opacity-60">: typing.Union[str, os.PathLike]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">push_to_hub<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.save_pretrained.save_directory" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.save_pretrained.save_directory"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>save_directory</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; Directory where the configuration JSON file will be saved (will be created if it does not exist).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.save_pretrained.push_to_hub" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.save_pretrained.push_to_hub"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>push_to_hub</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to push your model to the Hugging Face model hub after saving it.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p>Using <code>push_to_hub=True</code> will synchronize the repository you are pushing to with <code>save_directory</code>, which requires <code>save_directory</code> to be a local clone of the repo you are pushing to if it&#x2019;s an existing folder. Pass along <code>temp_dir=True</code> to use a temporary directory instead.</p> </div> <p>kwargs &#x2014; Additional key word arguments passed along to the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.file_utils.PushToHubMixin.push_to_hub">push_to_hub()</a> method.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Save a configuration object to the directory <code>save_directory</code>, so that it can be re-loaded using the <a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig.from_pretrained">from_pretrained()</a> class method.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PretrainedConfig.to_dict"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>to_dict</span></h4><!-- HTML_TAG_END --> <a id="transformers.PretrainedConfig.to_dict" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PretrainedConfig.to_dict"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/configuration_utils.py#L751" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>Dict[str, Any]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PretrainedConfig.to_dict.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>Dict[str, Any]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Dictionary of all the attributes that make up this configuration instance.</p> <!-- HTML_TAG_END --></p></div></div> <p>Serializes this instance to a Python dictionary.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PretrainedConfig.to_diff_dict"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>to_diff_dict</span></h4><!-- HTML_TAG_END --> <a id="transformers.PretrainedConfig.to_diff_dict" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PretrainedConfig.to_diff_dict"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/configuration_utils.py#L719" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>Dict[str, Any]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PretrainedConfig.to_diff_dict.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>Dict[str, Any]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Dictionary of all the attributes that make up this configuration instance,</p> <!-- HTML_TAG_END --></p></div></div> <p>Removes all attributes from config which correspond to the default config attributes for better readability and serializes to a Python dictionary.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PretrainedConfig.to_json_file"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>to_json_file</span></h4><!-- HTML_TAG_END --> <a id="transformers.PretrainedConfig.to_json_file" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PretrainedConfig.to_json_file"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/configuration_utils.py#L789" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">json_file_path<span class="opacity-60">: typing.Union[str, os.PathLike]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_diff<span class="opacity-60">: bool = True</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.to_json_file.json_file_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.to_json_file.json_file_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>json_file_path</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; Path to the JSON file in which this configuration instance&#x2019;s parameters will be saved.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.to_json_file.use_diff" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.to_json_file.use_diff"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_diff</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, only the difference between the config instance and the default <code>PretrainedConfig()</code> is serialized to JSON file.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Save this instance to a JSON file.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PretrainedConfig.to_json_string"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>to_json_string</span></h4><!-- HTML_TAG_END --> <a id="transformers.PretrainedConfig.to_json_string" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PretrainedConfig.to_json_string"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/configuration_utils.py#L771" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_diff<span class="opacity-60">: bool = True</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>str</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.to_json_string.use_diff" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.to_json_string.use_diff"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_diff</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If set to <code>True</code>, only the difference between the config instance and the default <code>PretrainedConfig()</code> is serialized to JSON string.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PretrainedConfig.to_json_string.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>str</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>String containing all the attributes that make up this configuration instance in JSON format.</p> <!-- HTML_TAG_END --></p></div></div> <p>Serializes this instance to a JSON string.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PretrainedConfig.update"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>update</span></h4><!-- HTML_TAG_END --> <a id="transformers.PretrainedConfig.update" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PretrainedConfig.update"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/configuration_utils.py#L803" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config_dict<span class="opacity-60">: typing.Dict[str, typing.Any]</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.update.config_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.update.config_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config_dict</strong> (<code>Dict[str, Any]</code>) &#x2014; Dictionary of attributes that should be updated for this class.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Updates attributes of this class with attributes from <code>config_dict</code>.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PretrainedConfig.update_from_string"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>update_from_string</span></h4><!-- HTML_TAG_END --> <a id="transformers.PretrainedConfig.update_from_string" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PretrainedConfig.update_from_string"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/configuration_utils.py#L813" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">update_str<span class="opacity-60">: str</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PretrainedConfig.update_from_string.update_str" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PretrainedConfig.update_from_string.update_str"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>update_str</strong> (<code>str</code>) &#x2014; String with attributes that should be updated for this class.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Updates attributes of this class with attributes from <code>update_str</code>.</p> <p>The expected format is ints, floats and strings as is, and for booleans use <code>true</code> or <code>false</code>. For example: “n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index”</p> <p>The keys to change have to already exist in the config object.</p></div></div> <script type="module" data-hydrate="vwjpk8"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="vwjpk8"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/main_classes/configuration.mdx-5be476eb.js") ], params: {} } }); </script>
430
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/main_classes/processors.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;processors&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.ProcessorMixin&quot;,&quot;title&quot;:&quot;Multi-modal processors&quot;},{&quot;local&quot;:&quot;transformers.DataProcessor&quot;,&quot;title&quot;:&quot;Deprecated processors&quot;},{&quot;local&quot;:&quot;transformers.glue_convert_examples_to_features&quot;,&quot;title&quot;:&quot;GLUE&quot;},{&quot;local&quot;:&quot;xnli&quot;,&quot;title&quot;:&quot;XNLI&quot;},{&quot;local&quot;:&quot;squad&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.data.processors.squad.SquadProcessor&quot;,&quot;title&quot;:&quot;Processors&quot;},{&quot;local&quot;:&quot;example-usage&quot;,&quot;title&quot;:&quot;Example usage&quot;}],&quot;title&quot;:&quot;SQuAD&quot;}],&quot;title&quot;:&quot;Processors&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/main_classes/processors.mdx-9cb76e12.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="processors" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#processors"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Processors </span></h1> <p>Processors can mean two different things in the Transformers library:</p> <ul><li>the objects that pre-process inputs for multi-modal models such as <a href="../model_doc/wav2vec2">Wav2Vec2</a> (speech and text) or <a href="../model_doc/clip">CLIP</a> (text and vision)</li> <li>deprecated objects that were used in older versions of the library to preprocess data for GLUE or SQUAD.</li></ul> <h2 class="relative group"><a id="transformers.ProcessorMixin" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProcessorMixin"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Multi-modal processors </span></h2> <p>Any multi-modal model will require an object to encode or decode the data that groups several modalities (among text, vision and audio). This is handled by objects called processors, which group tokenizers (for the text modality) and feature extractors (for vision and audio).</p> <p>Those processors inherit from the following base class that implements the saving and loading functionality:</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ProcessorMixin"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ProcessorMixin</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ProcessorMixin" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ProcessorMixin"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/processing_utils.py#L44" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>This is a mixin used to provide saving/loading functionality for all processor classes.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ProcessorMixin.from_pretrained"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>from_pretrained</span></h4><!-- HTML_TAG_END --> <a id="transformers.ProcessorMixin.from_pretrained" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ProcessorMixin.from_pretrained"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/processing_utils.py#L157" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pretrained_model_name_or_path<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProcessorMixin.from_pretrained.pretrained_model_name_or_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProcessorMixin.from_pretrained.pretrained_model_name_or_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pretrained_model_name_or_path</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; This can be either:</p> <ul> <li>a string, the <em>model id</em> of a pretrained feature_extractor hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>a path to a <em>directory</em> containing a feature extractor file saved using the <a href="/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin.save_pretrained">save_pretrained()</a> method, e.g., <code>./my_model_directory/</code>.</li> <li>a path or url to a saved feature extractor JSON <em>file</em>, e.g., <code>./my_model_directory/preprocessor_config.json</code>. **kwargs &#x2014; Additional keyword arguments passed along to both <a href="/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin.from_pretrained">from_pretrained()</a> and <code>from_pretrained</code>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Instantiate a processor associated with a pretrained model.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>This class method is simply calling the feature extractor <a href="/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin.from_pretrained">from_pretrained()</a> and the tokenizer <code>from_pretrained</code> methods. Please refer to the docstrings of the methods above for more information.</p></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.file_utils.PushToHubMixin.push_to_hub"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>push_to_hub</span></h4><!-- HTML_TAG_END --> <a id="transformers.file_utils.PushToHubMixin.push_to_hub" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.file_utils.PushToHubMixin.push_to_hub"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/file_utils.py#L2842" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">repo_path_or_name<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">repo_url<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_temp_dir<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">commit_message<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">organization<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">private<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_auth_token<span class="opacity-60">: typing.Union[bool, str, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**model_card_kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>str</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.repo_path_or_name" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.repo_path_or_name"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>repo_path_or_name</strong> (<code>str</code>, <em>optional</em>) &#x2014; Can either be a repository name for your processor in the Hub or a path to a local folder (in which case the repository will have the name of that local folder). If not specified, will default to the name given by <code>repo_url</code> and a local directory with that name will be created.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.repo_url" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.repo_url"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>repo_url</strong> (<code>str</code>, <em>optional</em>) &#x2014; Specify this in case you want to push to an existing repository in the hub. If unspecified, a new repository will be created in your namespace (unless you specify an <code>organization</code>) with <code>repo_name</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.use_temp_dir" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.use_temp_dir"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_temp_dir</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to clone the distant repo in a temporary directory or in <code>repo_path_or_name</code> inside the current working directory. This will slow things down if you are making changes in an existing repo since you will need to clone the repo before every push.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.commit_message" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.commit_message"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>commit_message</strong> (<code>str</code>, <em>optional</em>) &#x2014; Message to commit while pushing. Will default to <code>&quot;add processor&quot;</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.organization" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.organization"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>organization</strong> (<code>str</code>, <em>optional</em>) &#x2014; Organization in which you want to push your processor (you must be a member of this organization).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.private" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.private"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>private</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not the repository created should be private (requires a paying subscription).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.use_auth_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.use_auth_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_auth_token</strong> (<code>bool</code> or <code>str</code>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>transformers-cli login</code> (stored in <code>~/.huggingface</code>). Will default to <code>True</code> if <code>repo_url</code> is not specified.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.file_utils.PushToHubMixin.push_to_hub.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>str</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The url of the commit of your processor in the given repository.</p> <!-- HTML_TAG_END --></p></div></div> <p>Upload the processor files to the 🤗 Model Hub while synchronizing a local clone of the repo in <code>repo_path_or_name</code>.</p> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoProcessor processor = AutoProcessor.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-comment"># Push the processor to your namespace with the name &quot;my-finetuned-bert&quot; and have a local clone in the</span> <span class="hljs-comment"># *my-finetuned-bert* folder.</span> processor.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>) <span class="hljs-comment"># Push the processor to your namespace with the name &quot;my-finetuned-bert&quot; with no local clone.</span> processor.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, use_temp_dir=<span class="hljs-literal">True</span>) <span class="hljs-comment"># Push the processor to an organization with the name &quot;my-finetuned-bert&quot; and have a local clone in the</span> <span class="hljs-comment"># *my-finetuned-bert* folder.</span> processor.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, organization=<span class="hljs-string">&quot;huggingface&quot;</span>) <span class="hljs-comment"># Make a change to an existing repo that has been cloned locally in *my-finetuned-bert*.</span> processor.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, repo_url=<span class="hljs-string">&quot;https://huggingface.co/sgugger/my-finetuned-bert&quot;</span>)<!-- HTML_TAG_END --></pre></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ProcessorMixin.register_for_auto_class"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>register_for_auto_class</span></h4><!-- HTML_TAG_END --> <a id="transformers.ProcessorMixin.register_for_auto_class" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ProcessorMixin.register_for_auto_class"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/processing_utils.py#L190" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">auto_class<span class="opacity-60"> = &#39;AutoProcessor&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProcessorMixin.register_for_auto_class.auto_class" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProcessorMixin.register_for_auto_class.auto_class"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>auto_class</strong> (<code>str</code> or <code>type</code>, <em>optional</em>, defaults to <code>&quot;AutoProcessor&quot;</code>) &#x2014; The auto class to register this new feature extractor with.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Register this class with a given auto class. This should only be used for custom feature extractors as the ones in the library are already mapped with <code>AutoProcessor</code>.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"><p>This API is experimental and may have some slight breaking changes in the next releases.</p></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ProcessorMixin.save_pretrained"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save_pretrained</span></h4><!-- HTML_TAG_END --> <a id="transformers.ProcessorMixin.save_pretrained" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ProcessorMixin.save_pretrained"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/processing_utils.py#L95" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_directory<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">push_to_hub<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProcessorMixin.save_pretrained.save_directory" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProcessorMixin.save_pretrained.save_directory"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>save_directory</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; Directory where the feature extractor JSON file and the tokenizer files will be saved (directory will be created if it does not exist).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ProcessorMixin.save_pretrained.push_to_hub" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ProcessorMixin.save_pretrained.push_to_hub"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>push_to_hub</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to push your processor to the Hugging Face model hub after saving it.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p>Using <code>push_to_hub=True</code> will synchronize the repository you are pushing to with <code>save_directory</code>, which requires <code>save_directory</code> to be a local clone of the repo you are pushing to if it&#x2019;s an existing folder. Pass along <code>temp_dir=True</code> to use a temporary directory instead.</p> </div> <p>kwargs &#x2014; Additional key word arguments passed along to the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.file_utils.PushToHubMixin.push_to_hub">push_to_hub()</a> method.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Saves the attributes of this processor (feature extractor, tokenizer…) in the specified directory so that it can be reloaded using the <a href="/docs/transformers/pr_16143/en/main_classes/processors#transformers.ProcessorMixin.from_pretrained">from_pretrained()</a> method.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>This class method is simply calling <a href="/docs/transformers/pr_16143/en/main_classes/feature_extractor#transformers.FeatureExtractionMixin.save_pretrained">save_pretrained()</a> and <code>save_pretrained</code>. Please refer to the docstrings of the methods above for more information.</p></div></div></div> <h2 class="relative group"><a id="transformers.DataProcessor" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DataProcessor"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Deprecated processors </span></h2> <p>All processors follow the same architecture which is that of the <a href="/docs/transformers/pr_16143/en/main_classes/processors#transformers.DataProcessor">DataProcessor</a>. The processor returns a list of <a href="/docs/transformers/pr_16143/en/main_classes/processors#transformers.InputExample">InputExample</a>. These <a href="/docs/transformers/pr_16143/en/main_classes/processors#transformers.InputExample">InputExample</a> can be converted to <a href="/docs/transformers/pr_16143/en/main_classes/processors#transformers.InputFeatures">InputFeatures</a> in order to be fed to the model.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DataProcessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">DataProcessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.DataProcessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DataProcessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/processors/utils.py#L81" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Base class for data converters for sequence classification data sets.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DataProcessor.get_dev_examples"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_dev_examples</span></h4><!-- HTML_TAG_END --> <a id="transformers.DataProcessor.get_dev_examples" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DataProcessor.get_dev_examples"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/processors/utils.py#L98" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">data_dir<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Gets a collection of <a href="/docs/transformers/pr_16143/en/main_classes/processors#transformers.InputExample">InputExample</a> for the dev set.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DataProcessor.get_example_from_tensor_dict"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_example_from_tensor_dict</span></h4><!-- HTML_TAG_END --> <a id="transformers.DataProcessor.get_example_from_tensor_dict" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DataProcessor.get_example_from_tensor_dict"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/processors/utils.py#L84" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tensor_dict<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Gets an example from a dict with tensorflow tensors.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DataProcessor.get_labels"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_labels</span></h4><!-- HTML_TAG_END --> <a id="transformers.DataProcessor.get_labels" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DataProcessor.get_labels"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/processors/utils.py#L106" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Gets the list of labels for this data set.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DataProcessor.get_test_examples"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_test_examples</span></h4><!-- HTML_TAG_END --> <a id="transformers.DataProcessor.get_test_examples" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DataProcessor.get_test_examples"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/processors/utils.py#L102" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">data_dir<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Gets a collection of <a href="/docs/transformers/pr_16143/en/main_classes/processors#transformers.InputExample">InputExample</a> for the test set.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DataProcessor.get_train_examples"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_train_examples</span></h4><!-- HTML_TAG_END --> <a id="transformers.DataProcessor.get_train_examples" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DataProcessor.get_train_examples"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/processors/utils.py#L94" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">data_dir<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Gets a collection of <a href="/docs/transformers/pr_16143/en/main_classes/processors#transformers.InputExample">InputExample</a> for the train set.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DataProcessor.tfds_map"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>tfds_map</span></h4><!-- HTML_TAG_END --> <a id="transformers.DataProcessor.tfds_map" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DataProcessor.tfds_map"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/processors/utils.py#L110" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">example<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Some tensorflow_datasets datasets are not formatted the same way the GLUE datasets are. This method converts examples to the correct format.</p></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.InputExample"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">InputExample</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.InputExample" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.InputExample"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/processors/utils.py#L31" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">guid<span class="opacity-60">: str</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text_a<span class="opacity-60">: str</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text_b<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">label<span class="opacity-60">: typing.Optional[str] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>A single training/test example for simple sequence classification.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.InputExample.to_json_string"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>to_json_string</span></h4><!-- HTML_TAG_END --> <a id="transformers.InputExample.to_json_string" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.InputExample.to_json_string"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/processors/utils.py#L50" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Serializes this instance to a JSON string.</p></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.InputFeatures"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">InputFeatures</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.InputFeatures" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.InputFeatures"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/processors/utils.py#L56" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attention_mask<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_type_ids<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">label<span class="opacity-60">: typing.Union[int, float, NoneType] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>A single set of features of data. Property names are the same names as the corresponding inputs to a model.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.InputFeatures.to_json_string"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>to_json_string</span></h4><!-- HTML_TAG_END --> <a id="transformers.InputFeatures.to_json_string" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.InputFeatures.to_json_string"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/processors/utils.py#L76" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Serializes this instance to a JSON string.</p></div></div> <h2 class="relative group"><a id="transformers.glue_convert_examples_to_features" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.glue_convert_examples_to_features"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>GLUE </span></h2> <p><a href="https://gluebenchmark.com/" rel="nofollow">General Language Understanding Evaluation (GLUE)</a> is a benchmark that evaluates the performance of models across a diverse set of existing NLU tasks. It was released together with the paper <a href="https://openreview.net/pdf?id=rJ4km2R5t7" rel="nofollow">GLUE: A multi-task benchmark and analysis platform for natural language understanding</a></p> <p>This library hosts a total of 10 processors for the following tasks: MRPC, MNLI, MNLI (mismatched), CoLA, SST2, STSB, QQP, QNLI, RTE and WNLI.</p> <p>Those processors are:</p> <ul><li><code>MrpcProcessor</code></li> <li><code>MnliProcessor</code></li> <li><code>MnliMismatchedProcessor</code></li> <li><code>Sst2Processor</code></li> <li><code>StsbProcessor</code></li> <li><code>QqpProcessor</code></li> <li><code>QnliProcessor</code></li> <li><code>RteProcessor</code></li> <li><code>WnliProcessor</code></li></ul> <p>Additionally, the following method can be used to load values from a data file and convert them to a list of <a href="/docs/transformers/pr_16143/en/main_classes/processors#transformers.InputExample">InputExample</a>.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.glue_convert_examples_to_features"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.glue_convert_examples_to_features</span></h4><!-- HTML_TAG_END --> <a id="transformers.glue_convert_examples_to_features" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.glue_convert_examples_to_features"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/processors/glue.py#L42" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">examples<span class="opacity-60">: typing.Union[typing.List[transformers.data.processors.utils.InputExample], ForwardRef(&#39;tf.data.Dataset&#39;)]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer<span class="opacity-60">: PreTrainedTokenizer</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">task<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">label_list<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_mode<span class="opacity-60"> = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Loads a data file into a list of <code>InputFeatures</code></p></div> <h2 class="relative group"><a id="xnli" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#xnli"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>XNLI </span></h2> <p><a href="https://www.nyu.edu/projects/bowman/xnli/" rel="nofollow">The Cross-Lingual NLI Corpus (XNLI)</a> is a benchmark that evaluates the quality of cross-lingual text representations. XNLI is crowd-sourced dataset based on <a href="http://www.nyu.edu/projects/bowman/multinli/" rel="nofollow"><em>MultiNLI</em></a>: pairs of text are labeled with textual entailment annotations for 15 different languages (including both high-resource language such as English and low-resource languages such as Swahili).</p> <p>It was released together with the paper <a href="https://arxiv.org/abs/1809.05053" rel="nofollow">XNLI: Evaluating Cross-lingual Sentence Representations</a></p> <p>This library hosts the processor to load the XNLI data:</p> <ul><li><code>XnliProcessor</code></li></ul> <p>Please note that since the gold labels are available on the test set, evaluation is performed on the test set.</p> <p>An example using these processors is given in the <a href="https://github.com/huggingface/transformers/tree/master/examples/legacy/text-classification/run_xnli.py" rel="nofollow">run_xnli.py</a> script.</p> <h2 class="relative group"><a id="squad" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#squad"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>SQuAD </span></h2> <p><a href="https://rajpurkar.github.io/SQuAD-explorer//" rel="nofollow">The Stanford Question Answering Dataset (SQuAD)</a> is a benchmark that evaluates the performance of models on question answering. Two versions are available, v1.1 and v2.0. The first version (v1.1) was released together with the paper <a href="https://arxiv.org/abs/1606.05250" rel="nofollow">SQuAD: 100,000+ Questions for Machine Comprehension of Text</a>. The second version (v2.0) was released alongside the paper <a href="https://arxiv.org/abs/1806.03822" rel="nofollow">Know What You Don’t Know: Unanswerable Questions for SQuAD</a>.</p> <p>This library hosts a processor for each of the two versions:</p> <h3 class="relative group"><a id="transformers.data.processors.squad.SquadProcessor" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.data.processors.squad.SquadProcessor"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Processors </span></h3> <p>Those processors are:</p> <ul><li><code>SquadV1Processor</code></li> <li><code>SquadV2Processor</code></li></ul> <p>They both inherit from the abstract class <code>SquadProcessor</code></p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.data.processors.squad.SquadProcessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.data.processors.squad.</span><span class="font-semibold">SquadProcessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.data.processors.squad.SquadProcessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.data.processors.squad.SquadProcessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/processors/squad.py#L543" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Processor for the SQuAD data set. overridden by SquadV1Processor and SquadV2Processor, used by the version 1.1 and version 2.0 of SQuAD, respectively.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.data.processors.squad.SquadProcessor.get_dev_examples"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_dev_examples</span></h4><!-- HTML_TAG_END --> <a id="transformers.data.processors.squad.SquadProcessor.get_dev_examples" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.data.processors.squad.SquadProcessor.get_dev_examples"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/processors/squad.py#L631" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">data_dir<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">filename<span class="opacity-60"> = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Returns the evaluation example from the data directory.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.data.processors.squad.SquadProcessor.get_examples_from_dataset"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_examples_from_dataset</span></h4><!-- HTML_TAG_END --> <a id="transformers.data.processors.squad.SquadProcessor.get_examples_from_dataset" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.data.processors.squad.SquadProcessor.get_examples_from_dataset"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/processors/squad.py#L576" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dataset<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">evaluate<span class="opacity-60"> = False</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Creates a list of <code>SquadExample</code>using a TFDS dataset.</p> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow_datasets <span class="hljs-keyword">as</span> tfds <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = tfds.load(<span class="hljs-string">&quot;squad&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>training_examples = get_examples_from_dataset(dataset, evaluate=<span class="hljs-literal">False</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>evaluation_examples = get_examples_from_dataset(dataset, evaluate=<span class="hljs-literal">True</span>)<!-- HTML_TAG_END --></pre></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.data.processors.squad.SquadProcessor.get_train_examples"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_train_examples</span></h4><!-- HTML_TAG_END --> <a id="transformers.data.processors.squad.SquadProcessor.get_train_examples" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.data.processors.squad.SquadProcessor.get_train_examples"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/processors/squad.py#L609" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">data_dir<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">filename<span class="opacity-60"> = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Returns the training examples from the data directory.</p></div></div> <p>Additionally, the following method can be used to convert SQuAD examples into <code>SquadFeatures</code> that can be used as model inputs.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.squad_convert_examples_to_features"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.squad_convert_examples_to_features</span></h4><!-- HTML_TAG_END --> <a id="transformers.squad_convert_examples_to_features" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.squad_convert_examples_to_features"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/data/processors/squad.py#L318" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">examples<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_seq_length<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">doc_stride<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_query_length<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">is_training<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">padding_strategy<span class="opacity-60"> = &#39;max_length&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dataset<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">threads<span class="opacity-60"> = 1</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tqdm_enabled<span class="opacity-60"> = True</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Converts a list of examples into a list of features that can be directly given as input to a model. It is model-dependant and takes advantage of many of the tokenizer’s features to create the model’s inputs.</p> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->processor = SquadV2Processor() examples = processor.get_dev_examples(data_dir) features = squad_convert_examples_to_features( examples=examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=<span class="hljs-keyword">not</span> evaluate, )<!-- HTML_TAG_END --></pre></div></div> <p>These processors as well as the aforementionned method can be used with files containing the data as well as with the <em>tensorflow_datasets</em> package. Examples are given below.</p> <h3 class="relative group"><a id="example-usage" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#example-usage"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Example usage </span></h3> <p>Here is an example using the processors as well as the conversion method using data files:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-comment"># Loading a V2 processor</span> processor = SquadV2Processor() examples = processor.get_dev_examples(squad_v2_data_dir) <span class="hljs-comment"># Loading a V1 processor</span> processor = SquadV1Processor() examples = processor.get_dev_examples(squad_v1_data_dir) features = squad_convert_examples_to_features( examples=examples, tokenizer=tokenizer, max_seq_length=max_seq_length, doc_stride=args.doc_stride, max_query_length=max_query_length, is_training=<span class="hljs-keyword">not</span> evaluate, )<!-- HTML_TAG_END --></pre></div> <p>Using <em>tensorflow_datasets</em> is as easy as using a data file:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-comment"># tensorflow_datasets only handle Squad V1.</span> tfds_examples = tfds.load(<span class="hljs-string">&quot;squad&quot;</span>) examples = SquadV1Processor().get_examples_from_dataset(tfds_examples, evaluate=evaluate) features = squad_convert_examples_to_features( examples=examples, tokenizer=tokenizer, max_seq_length=max_seq_length, doc_stride=args.doc_stride, max_query_length=max_query_length, is_training=<span class="hljs-keyword">not</span> evaluate, )<!-- HTML_TAG_END --></pre></div> <p>Another example using these processors is given in the <a href="https://github.com/huggingface/transformers/tree/master/examples/legacy/question-answering/run_squad.py" rel="nofollow">run_squad.py</a> script.</p> <script type="module" data-hydrate="p6ors0"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="p6ors0"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/main_classes/processors.mdx-9cb76e12.js") ], params: {} } }); </script>
431
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/main_classes/trainer.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;trainer&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.Trainer&quot;,&quot;title&quot;:&quot;Trainer&quot;},{&quot;local&quot;:&quot;transformers.Seq2SeqTrainer&quot;,&quot;title&quot;:&quot;Seq2SeqTrainer&quot;},{&quot;local&quot;:&quot;transformers.TrainingArguments&quot;,&quot;title&quot;:&quot;TrainingArguments&quot;},{&quot;local&quot;:&quot;transformers.Seq2SeqTrainingArguments&quot;,&quot;title&quot;:&quot;Seq2SeqTrainingArguments&quot;},{&quot;local&quot;:&quot;checkpoints&quot;,&quot;title&quot;:&quot;Checkpoints&quot;},{&quot;local&quot;:&quot;logging&quot;,&quot;title&quot;:&quot;Logging&quot;},{&quot;local&quot;:&quot;randomness&quot;,&quot;title&quot;:&quot;Randomness&quot;},{&quot;local&quot;:&quot;specific-gpus-selection&quot;,&quot;title&quot;:&quot;Specific GPUs Selection&quot;},{&quot;local&quot;:&quot;trainer-integrations&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;cuda-extension-installation-notes&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;possible-problem-1&quot;,&quot;title&quot;:&quot;Possible problem #1&quot;},{&quot;local&quot;:&quot;possible-problem-2&quot;,&quot;title&quot;:&quot;Possible problem #2&quot;},{&quot;local&quot;:&quot;possible-problem-3&quot;,&quot;title&quot;:&quot;Possible problem #3&quot;}],&quot;title&quot;:&quot;CUDA Extension Installation Notes&quot;},{&quot;local&quot;:&quot;fairscale&quot;,&quot;title&quot;:&quot;FairScale&quot;}],&quot;title&quot;:&quot;Trainer Integrations&quot;}],&quot;title&quot;:&quot;Trainer&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/main_classes/trainer.mdx-a51a0aac.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="trainer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#trainer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Trainer </span></h1> <p>The <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> class provides an API for feature-complete training in PyTorch for most standard use cases. It’s used in most of the <a href="../examples">example scripts</a>.</p> <p>Before instantiating your <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>, create a <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a> to access all the points of customization during training.</p> <p>The API supports distributed training on multiple GPUs/TPUs, mixed precision through <a href="https://github.com/NVIDIA/apex" rel="nofollow">NVIDIA Apex</a> and Native AMP for PyTorch.</p> <p>The <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> contains the basic training loop which supports the above features. To inject custom behavior you can subclass them and override the following methods:</p> <ul><li><strong>get_train_dataloader</strong> — Creates the training DataLoader.</li> <li><strong>get_eval_dataloader</strong> — Creates the evaluation DataLoader.</li> <li><strong>get_test_dataloader</strong> — Creates the test DataLoader.</li> <li><strong>log</strong> — Logs information on the various objects watching training.</li> <li><strong>create_optimizer_and_scheduler</strong> — Sets up the optimizer and learning rate scheduler if they were not passed at init. Note, that you can also subclass or override the <code>create_optimizer</code> and <code>create_scheduler</code> methods separately.</li> <li><strong>create_optimizer</strong> — Sets up the optimizer if it wasn’t passed at init.</li> <li><strong>create_scheduler</strong> — Sets up the learning rate scheduler if it wasn’t passed at init.</li> <li><strong>compute_loss</strong> - Computes the loss on a batch of training inputs.</li> <li><strong>training_step</strong> — Performs a training step.</li> <li><strong>prediction_step</strong> — Performs an evaluation/test step.</li> <li><strong>evaluate</strong> — Runs an evaluation loop and returns metrics.</li> <li><strong>predict</strong> — Returns predictions (with metrics if labels are available) on a test set.</li></ul> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"><p>The <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> class is optimized for 🤗 Transformers models and can have surprising behaviors when you use it on other models. When using it on your own model, make sure:</p> <ul><li>your model always return tuples or subclasses of <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a>.</li> <li>your model can compute the loss if a <code>labels</code> argument is provided and that loss is returned as the first element of the tuple (if your model returns tuples)</li> <li>your model can accept multiple label arguments (use the <code>label_names</code> in your <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a> to indicate their name to the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>) but none of them should be named <code>&quot;label&quot;</code>.</li></ul></div> <p>Here is an example of how to customize <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> to use a weighted loss (useful when you have an unbalanced training set):</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> torch <span class="hljs-keyword">import</span> nn <span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Trainer <span class="hljs-keyword">class</span> <span class="hljs-title class_">CustomTrainer</span>(<span class="hljs-title class_ inherited__">Trainer</span>): <span class="hljs-keyword">def</span> <span class="hljs-title function_">compute_loss</span>(<span class="hljs-params">self, model, inputs, return_outputs=<span class="hljs-literal">False</span></span>): labels = inputs.get(<span class="hljs-string">&quot;labels&quot;</span>) <span class="hljs-comment"># forward pass</span> outputs = model(**inputs) logits = outputs.get(<span class="hljs-string">&quot;logits&quot;</span>) <span class="hljs-comment"># compute custom loss (suppose one has 3 labels with different weights)</span> loss_fct = nn.CrossEntropyLoss(weight=torch.tensor([<span class="hljs-number">1.0</span>, <span class="hljs-number">2.0</span>, <span class="hljs-number">3.0</span>])) loss = loss_fct(logits.view(-<span class="hljs-number">1</span>, self.model.config.num_labels), labels.view(-<span class="hljs-number">1</span>)) <span class="hljs-keyword">return</span> (loss, outputs) <span class="hljs-keyword">if</span> return_outputs <span class="hljs-keyword">else</span> loss<!-- HTML_TAG_END --></pre></div> <p>Another way to customize the training loop behavior for the PyTorch <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> is to use <a href="callback">callbacks</a> that can inspect the training loop state (for progress reporting, logging on TensorBoard or other ML platforms…) and take decisions (like early stopping).</p> <h2 class="relative group"><a id="transformers.Trainer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Trainer </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">Trainer</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.Trainer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L199" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model<span class="opacity-60">: typing.Union[transformers.modeling_utils.PreTrainedModel, torch.nn.modules.module.Module] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args<span class="opacity-60">: TrainingArguments = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">data_collator<span class="opacity-60">: typing.Optional[DataCollator] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">train_dataset<span class="opacity-60">: typing.Optional[torch.utils.data.dataset.Dataset] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eval_dataset<span class="opacity-60">: typing.Optional[torch.utils.data.dataset.Dataset] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer<span class="opacity-60">: typing.Optional[transformers.tokenization_utils_base.PreTrainedTokenizerBase] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model_init<span class="opacity-60">: typing.Callable[[], transformers.modeling_utils.PreTrainedModel] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">compute_metrics<span class="opacity-60">: typing.Union[typing.Callable[[transformers.trainer_utils.EvalPrediction], typing.Dict], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">callbacks<span class="opacity-60">: typing.Optional[typing.List[transformers.trainer_callback.TrainerCallback]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">optimizers<span class="opacity-60">: typing.Tuple[torch.optim.optimizer.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None)</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">preprocess_logits_for_metrics<span class="opacity-60">: typing.Callable[[torch.Tensor, torch.Tensor], torch.Tensor] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> or <code>torch.nn.Module</code>, <em>optional</em>) &#x2014; The model to train, evaluate or use for predictions. If not provided, a <code>model_init</code> must be passed.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p><a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> is optimized to work with the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> provided by the library. You can still use your own models defined as <code>torch.nn.Module</code> as long as they work the same way as the &#x1F917; Transformers models.</p> </div><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.args" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.args"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a>, <em>optional</em>) &#x2014; The arguments to tweak for training. Will default to a basic instance of <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a> with the <code>output_dir</code> set to a directory named <em>tmp_trainer</em> in the current directory if not provided.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.data_collator" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.data_collator"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>data_collator</strong> (<code>DataCollator</code>, <em>optional</em>) &#x2014; The function to use to form a batch from a list of elements of <code>train_dataset</code> or <code>eval_dataset</code>. Will default to <a href="/docs/transformers/pr_16143/en/main_classes/data_collator#transformers.default_data_collator">default_data_collator()</a> if no <code>tokenizer</code> is provided, an instance of <a href="/docs/transformers/pr_16143/en/main_classes/data_collator#transformers.DataCollatorWithPadding">DataCollatorWithPadding</a> otherwise.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.train_dataset" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.train_dataset"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>train_dataset</strong> (<code>torch.utils.data.Dataset</code> or <code>torch.utils.data.IterableDataset</code>, <em>optional</em>) &#x2014; The dataset to use for training. If it is an <code>datasets.Dataset</code>, columns not accepted by the <code>model.forward()</code> method are automatically removed.</p> <p>Note that if it&#x2019;s a <code>torch.utils.data.IterableDataset</code> with some randomization and you are training in a distributed fashion, your iterable dataset should either use a internal attribute <code>generator</code> that is a <code>torch.Generator</code> for the randomization that must be identical on all processes (and the Trainer will manually set the seed of this <code>generator</code> at each epoch) or have a <code>set_epoch()</code> method that internally sets the seed of the RNGs used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.eval_dataset" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.eval_dataset"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eval_dataset</strong> (<code>torch.utils.data.Dataset</code>, <em>optional</em>) &#x2014; The dataset to use for evaluation. If it is an <code>datasets.Dataset</code>, columns not accepted by the <code>model.forward()</code> method are automatically removed.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase">PreTrainedTokenizerBase</a>, <em>optional</em>) &#x2014; The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs the maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an interrupted training or reuse the fine-tuned model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.model_init" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.model_init"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model_init</strong> (<code>Callable[[], PreTrainedModel]</code>, <em>optional</em>) &#x2014; A function that instantiates the model to be used. If provided, each call to <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer.train">train()</a> will start from a new instance of the model as given by this function.</p> <p>The function may have zero argument, or a single one containing the optuna/Ray Tune/SigOpt trial object, to be able to choose different architectures according to hyper parameters (such as layer count, sizes of inner layers, dropout probabilities etc).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.compute_metrics" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.compute_metrics"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>compute_metrics</strong> (<code>Callable[[EvalPrediction], Dict]</code>, <em>optional</em>) &#x2014; The function that will be used to compute metrics at evaluation. Must take a <a href="/docs/transformers/pr_16143/en/internal/trainer_utils#transformers.EvalPrediction">EvalPrediction</a> and return a dictionary string to metric values.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.callbacks" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.callbacks"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>callbacks</strong> (List of <a href="/docs/transformers/pr_16143/en/main_classes/callback#transformers.TrainerCallback">TrainerCallback</a>, <em>optional</em>) &#x2014; A list of callbacks to customize the training loop. Will add those to the list of default callbacks detailed in <a href="callback">here</a>.</p> <p>If you want to remove one of the default callbacks used, use the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer.remove_callback">Trainer.remove_callback()</a> method.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.optimizers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.optimizers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>optimizers</strong> (<code>Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]</code>, <em>optional</em>) &#x2014; A tuple containing the optimizer and the scheduler to use. Will default to an instance of <a href="/docs/transformers/pr_16143/en/main_classes/optimizer_schedules#transformers.AdamW">AdamW</a> on your model and a scheduler given by <a href="/docs/transformers/pr_16143/en/main_classes/optimizer_schedules#transformers.get_linear_schedule_with_warmup">get_linear_schedule_with_warmup()</a> controlled by <code>args</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.preprocess_logits_for_metrics" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.preprocess_logits_for_metrics"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>preprocess_logits_for_metrics</strong> (<code>Callable[[torch.Tensor, torch.Tensor], torch.Tensor]</code>, <em>optional</em>) &#x2014; A function that preprocess the logits right before caching them at each evaluation step. Must take two tensors, the logits and the labels, and return the logits once processed as desired. The modifications made by this function will be reflected in the predictions received by <code>compute_metrics</code>.</p> <p>Note that the labels (second parameter) will be <code>None</code> if the dataset does not have them.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers.</p> <p>Important attributes:</p> <ul><li><strong>model</strong> — Always points to the core model. If using a transformers model, it will be a <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a> subclass.</li> <li><strong>model_wrapped</strong> — Always points to the most external model in case one or more other modules wrap the original model. This is the model that should be used for the forward pass. For example, under <code>DeepSpeed</code>, the inner model is wrapped in <code>DeepSpeed</code> and then again in <code>torch.nn.DistributedDataParallel</code>. If the inner model hasn’t been wrapped, then <code>self.model_wrapped</code> is the same as <code>self.model</code>.</li> <li><strong>is_model_parallel</strong> — Whether or not a model has been switched to a model parallel mode (different from data parallelism, this means some of the model layers are split on different GPUs).</li> <li><strong>place_model_on_device</strong> — Whether or not to automatically place the model on the device - it will be set to <code>False</code> if model parallel or deepspeed is used, or if the default <code>TrainingArguments.place_model_on_device</code> is overridden to return <code>False</code> .</li> <li><strong>is_in_train</strong> — Whether or not a model is currently running <code>train</code> (e.g. when <code>evaluate</code> is called while in <code>train</code>)</li></ul> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.add_callback"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>add_callback</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.add_callback" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.add_callback"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L513" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">callback<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.add_callback.callback" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.add_callback.callback"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>callback</strong> (<code>type</code> or <code>TrainerCallback</code>) &#x2014; A <code>TrainerCallback</code> class or an instance of a <code>TrainerCallback</code>. In the first case, will instantiate a member of that class.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Add a callback to the current list of <code>TrainerCallback</code>.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.autocast_smart_context_manager"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>autocast_smart_context_manager</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.autocast_smart_context_manager" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.autocast_smart_context_manager"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L1956" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>A helper wrapper that creates an appropriate context manager for <code>autocast</code> while feeding it the desired arguments, depending on the situation.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.compute_loss"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>compute_loss</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.compute_loss" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.compute_loss"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L2020" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_outputs<span class="opacity-60"> = False</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>How the loss is computed by Trainer. By default, all models return the loss in the first element.</p> <p>Subclass and override for custom behavior.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.create_optimizer"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>create_optimizer</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.create_optimizer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.create_optimizer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L836" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Setup the optimizer.</p> <p>We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the Trainer’s init through <code>optimizers</code>, or subclass and override this method in a subclass.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.create_optimizer_and_scheduler"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>create_optimizer_and_scheduler</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.create_optimizer_and_scheduler" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.create_optimizer_and_scheduler"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L825" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_training_steps<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Setup the optimizer and the learning rate scheduler.</p> <p>We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the Trainer’s init through <code>optimizers</code>, or subclass and override this method (or <code>create_optimizer</code> and/or <code>create_scheduler</code>) in a subclass.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.create_scheduler"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>create_scheduler</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.create_scheduler" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.create_scheduler"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L921" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_training_steps<span class="opacity-60">: int</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">optimizer<span class="opacity-60">: Optimizer = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.create_scheduler.num_training_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.create_scheduler.num_training_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_training_steps</strong> (int) &#x2014; The number of training steps to do.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Setup the scheduler. The optimizer of the trainer must have been set up either before this method is called or passed as an argument.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.evaluate"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>evaluate</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.evaluate" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.evaluate"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L2234" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eval_dataset<span class="opacity-60">: typing.Optional[torch.utils.data.dataset.Dataset] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ignore_keys<span class="opacity-60">: typing.Optional[typing.List[str]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">metric_key_prefix<span class="opacity-60">: str = &#39;eval&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.evaluate.eval_dataset" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.evaluate.eval_dataset"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eval_dataset</strong> (<code>Dataset</code>, <em>optional</em>) &#x2014; Pass a dataset if you wish to override <code>self.eval_dataset</code>. If it is an <code>datasets.Dataset</code>, columns not accepted by the <code>model.forward()</code> method are automatically removed. It must implement the <code>__len__</code> method.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.evaluate.ignore_keys" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.evaluate.ignore_keys"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ignore_keys</strong> (<code>Lst[str]</code>, <em>optional</em>) &#x2014; A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.evaluate.metric_key_prefix" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.evaluate.metric_key_prefix"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>metric_key_prefix</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;eval&quot;</code>) &#x2014; An optional prefix to be used as the metrics key prefix. For example the metrics &#x201C;bleu&#x201D; will be named &#x201C;eval_bleu&#x201D; if the prefix is &#x201C;eval&#x201D; (default)<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Run evaluation and returns metrics.</p> <p>The calling script will be responsible for providing a method to compute metrics, as they are task-dependent (pass it to the init <code>compute_metrics</code> argument).</p> <p>You can also subclass and override this method to inject custom behavior.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.evaluation_loop"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>evaluation_loop</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.evaluation_loop" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.evaluation_loop"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L2362" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dataloader<span class="opacity-60">: DataLoader</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">description<span class="opacity-60">: str</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">prediction_loss_only<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ignore_keys<span class="opacity-60">: typing.Optional[typing.List[str]] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">metric_key_prefix<span class="opacity-60">: str = &#39;eval&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Prediction/evaluation loop, shared by <code>Trainer.evaluate()</code> and <code>Trainer.predict()</code>.</p> <p>Works both with or without labels.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.floating_point_ops"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>floating_point_ops</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.floating_point_ops" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.floating_point_ops"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L2683" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60">: typing.Dict[str, typing.Union[torch.Tensor, typing.Any]]</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>int</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.floating_point_ops.inputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.floating_point_ops.inputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs</strong> (<code>Dict[str, Union[torch.Tensor, Any]]</code>) &#x2014; The inputs and targets of the model.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.Trainer.floating_point_ops.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>int</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The number of floating-point operations.</p> <!-- HTML_TAG_END --></p></div></div> <p>For models that inherit from <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel">PreTrainedModel</a>, uses that method to compute the number of floating point operations for every backward + forward pass. If using another model, either implement such a method in the model or subclass and override this method.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.get_eval_dataloader"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_eval_dataloader</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.get_eval_dataloader" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.get_eval_dataloader"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L735" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eval_dataset<span class="opacity-60">: typing.Optional[torch.utils.data.dataset.Dataset] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.get_eval_dataloader.eval_dataset" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.get_eval_dataloader.eval_dataset"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eval_dataset</strong> (<code>torch.utils.data.Dataset</code>, <em>optional</em>) &#x2014; If provided, will override <code>self.eval_dataset</code>. If it is an <code>datasets.Dataset</code>, columns not accepted by the <code>model.forward()</code> method are automatically removed. It must implement <code>__len__</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Returns the evaluation <code>DataLoader</code>.</p> <p>Subclass and override this method if you want to inject some custom behavior.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.get_optimizer_cls_and_kwargs"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_optimizer_cls_and_kwargs</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.get_optimizer_cls_and_kwargs" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.get_optimizer_cls_and_kwargs"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L873" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args<span class="opacity-60">: TrainingArguments</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.get_optimizer_cls_and_kwargs.args" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.get_optimizer_cls_and_kwargs.args"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>args</strong> (<code>transformers.training_args.TrainingArguments</code>) &#x2014; The training arguments for the training session.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Returns the optimizer class and optimizer parameters based on the training arguments.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.get_test_dataloader"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_test_dataloader</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.get_test_dataloader" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.get_test_dataloader"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L782" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">test_dataset<span class="opacity-60">: Dataset</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.get_test_dataloader.test_dataset" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.get_test_dataloader.test_dataset"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>test_dataset</strong> (<code>torch.utils.data.Dataset</code>, <em>optional</em>) &#x2014; The test dataset to use. If it is an <code>datasets.Dataset</code>, columns not accepted by the <code>model.forward()</code> method are automatically removed. It must implement <code>__len__</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Returns the test <code>DataLoader</code>.</p> <p>Subclass and override this method if you want to inject some custom behavior.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.get_train_dataloader"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_train_dataloader</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.get_train_dataloader" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.get_train_dataloader"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L660" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Returns the training <code>DataLoader</code>.</p> <p>Will use no sampler if <code>self.train_dataset</code> does not implement <code>__len__</code>, a random sampler (adapted to distributed training if necessary) otherwise.</p> <p>Subclass and override this method if you want to inject some custom behavior.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.hyperparameter_search"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>hyperparameter_search</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.hyperparameter_search" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.hyperparameter_search"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L1812" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hp_space<span class="opacity-60">: typing.Union[typing.Callable[[ForwardRef(&#39;optuna.Trial&#39;)], typing.Dict[str, float]], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">compute_objective<span class="opacity-60">: typing.Union[typing.Callable[[typing.Dict[str, float]], float], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">n_trials<span class="opacity-60">: int = 20</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">direction<span class="opacity-60">: str = &#39;minimize&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">backend<span class="opacity-60">: typing.Union[ForwardRef(&#39;str&#39;), transformers.trainer_utils.HPSearchBackend, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hp_name<span class="opacity-60">: typing.Union[typing.Callable[[ForwardRef(&#39;optuna.Trial&#39;)], str], NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.hyperparameter_search.hp_space" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.hyperparameter_search.hp_space"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hp_space</strong> (<code>Callable[[&quot;optuna.Trial&quot;], Dict[str, float]]</code>, <em>optional</em>) &#x2014; A function that defines the hyperparameter search space. Will default to <code>default_hp_space_optuna()</code>or <code>default_hp_space_ray()</code>or <code>default_hp_space_sigopt()</code>depending on your backend.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.hyperparameter_search.compute_objective" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.hyperparameter_search.compute_objective"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>compute_objective</strong> (<code>Callable[[Dict[str, float]], float]</code>, <em>optional</em>) &#x2014; A function computing the objective to minimize or maximize from the metrics returned by the <code>evaluate</code> method. Will default to <code>default_compute_objective()</code><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.hyperparameter_search.n_trials" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.hyperparameter_search.n_trials"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>n_trials</strong> (<code>int</code>, <em>optional</em>, defaults to 100) &#x2014; The number of trial runs to test.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.hyperparameter_search.direction(str," class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.hyperparameter_search.direction(str,"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>direction(<code>str</code>,</strong> <em>optional</em>, defaults to <code>&quot;minimize&quot;</code>) &#x2014; Whether to optimize greater or lower objects. Can be <code>&quot;minimize&quot;</code> or <code>&quot;maximize&quot;</code>, you should pick <code>&quot;minimize&quot;</code> when optimizing the validation loss, <code>&quot;maximize&quot;</code> when optimizing one or several metrics.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.hyperparameter_search.backend(str" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.hyperparameter_search.backend(str"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>backend(<code>str</code></strong> or <code>HPSearchBackend</code>, <em>optional</em>) &#x2014; The backend to use for hyperparameter search. Will default to optuna or Ray Tune or SigOpt, depending on which one is installed. If all are installed, will default to optuna. kwargs &#x2014; Additional keyword arguments passed along to <code>optuna.create_study</code> or <code>ray.tune.run</code>. For more information see:</p> <ul> <li>the documentation of <a href="https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.html" rel="nofollow">optuna.create_study</a></li> <li>the documentation of <a href="https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run" rel="nofollow">tune.run</a></li> <li>the documentation of <a href="https://app.sigopt.com/docs/endpoints/experiments/create" rel="nofollow">sigopt</a></li> </ul><!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Launch an hyperparameter search using <code>optuna</code> or <code>Ray Tune</code> or <code>SigOpt</code>. The optimized quantity is determined by <code>compute_objective</code>, which defaults to a function returning the evaluation loss when no metric is provided, the sum of all metrics otherwise.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"><p>To use this method, you need to have provided a <code>model_init</code> when initializing your <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>: we need to reinitialize the model at each new run. This is incompatible with the <code>optimizers</code> argument, so you need to subclass <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> and override the method <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer.create_optimizer_and_scheduler">create_optimizer_and_scheduler()</a> for custom optimizer/scheduler.</p></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.init_git_repo"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>init_git_repo</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.init_git_repo" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.init_git_repo"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L2701" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">at_init<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.init_git_repo.at_init" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.init_git_repo.at_init"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>at_init</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether this function is called before any training or not. If <code>self.args.overwrite_output_dir</code> is <code>True</code> and <code>at_init</code> is <code>True</code>, the path to the repo (which is <code>self.args.output_dir</code>) might be wiped out.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Initializes a git repo in <code>self.args.hub_model_id</code>.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.is_local_process_zero"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>is_local_process_zero</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.is_local_process_zero" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.is_local_process_zero"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L2044" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several machines) main process.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.is_world_process_zero"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>is_world_process_zero</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.is_world_process_zero" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.is_world_process_zero"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L2051" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Whether or not this process is the global main process (when training in a distributed fashion on several machines, this is only going to be <code>True</code> for one process).</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.log"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>log</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.log" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.log"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L1905" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logs<span class="opacity-60">: typing.Dict[str, float]</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.log.logs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.log.logs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logs</strong> (<code>Dict[str, float]</code>) &#x2014; The values to log.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Log <code>logs</code> on the various objects watching training.</p> <p>Subclass and override this method to inject custom behavior.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.trainer_pt_utils.log_metrics"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>log_metrics</span></h4><!-- HTML_TAG_END --> <a id="transformers.trainer_pt_utils.log_metrics" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.trainer_pt_utils.log_metrics"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_pt_utils.py#L855" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">split<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">metrics<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.trainer_pt_utils.log_metrics.split" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.trainer_pt_utils.log_metrics.split"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>split</strong> (<code>str</code>) &#x2014; Mode/split name: one of <code>train</code>, <code>eval</code>, <code>test</code><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.trainer_pt_utils.log_metrics.metrics" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.trainer_pt_utils.log_metrics.metrics"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>metrics</strong> (<code>Dict[str, float]</code>) &#x2014; The metrics returned from train/evaluate/predictmetrics: metrics dict<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Log metrics in a specially formatted way</p> <p>Under distributed environment this is done only for a process with rank 0.</p> <p>Notes on memory reports:</p> <p>In order to get memory usage report you need to install <code>psutil</code>. You can do that with <code>pip install psutil</code>.</p> <p>Now when this method is run, you will see a report that will include: :</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-attr">init_mem_cpu_alloc_delta</span> = <span class="hljs-number">1301</span>MB <span class="hljs-attr">init_mem_cpu_peaked_delta</span> = <span class="hljs-number">154</span>MB <span class="hljs-attr">init_mem_gpu_alloc_delta</span> = <span class="hljs-number">230</span>MB <span class="hljs-attr">init_mem_gpu_peaked_delta</span> = <span class="hljs-number">0</span>MB <span class="hljs-attr">train_mem_cpu_alloc_delta</span> = <span class="hljs-number">1345</span>MB <span class="hljs-attr">train_mem_cpu_peaked_delta</span> = <span class="hljs-number">0</span>MB <span class="hljs-attr">train_mem_gpu_alloc_delta</span> = <span class="hljs-number">693</span>MB <span class="hljs-attr">train_mem_gpu_peaked_delta</span> = <span class="hljs-number">7</span>MB<!-- HTML_TAG_END --></pre></div> <p><strong>Understanding the reports:</strong></p> <ul><li>the first segment, e.g., <code>train__</code>, tells you which stage the metrics are for. Reports starting with <code>init_</code> will be added to the first stage that gets run. So that if only evaluation is run, the memory usage for the <code>__init__</code> will be reported along with the <code>eval_</code> metrics.</li> <li>the third segment, is either <code>cpu</code> or <code>gpu</code>, tells you whether it’s the general RAM or the gpu0 memory metric.</li> <li><code>*_alloc_delta</code> - is the difference in the used/allocated memory counter between the end and the start of the stage - it can be negative if a function released more memory than it allocated.</li> <li><code>*_peaked_delta</code> - is any extra memory that was consumed and then freed - relative to the current allocated memory counter - it is never negative. When you look at the metrics of any stage you add up <code>alloc_delta</code> + <code>peaked_delta</code> and you know how much memory was needed to complete that stage.</li></ul> <p>The reporting happens only for process of rank 0 and gpu 0 (if there is a gpu). Typically this is enough since the main process does the bulk of work, but it could be not quite so if model parallel is used and then other GPUs may use a different amount of gpu memory. This is also not the same under DataParallel where gpu0 may require much more memory than the rest since it stores the gradient and optimizer states for all participating GPUS. Perhaps in the future these reports will evolve to measure those too.</p> <p>The CPU RAM metric measures RSS (Resident Set Size) includes both the memory which is unique to the process and the memory shared with other processes. It is important to note that it does not include swapped out memory, so the reports could be imprecise.</p> <p>The CPU peak memory is measured using a sampling thread. Due to python’s GIL it may miss some of the peak memory if that thread didn’t get a chance to run when the highest memory was used. Therefore this report can be less than reality. Using <code>tracemalloc</code> would have reported the exact peak memory, but it doesn’t report memory allocations outside of python. So if some C++ CUDA extension allocated its own memory it won’t be reported. And therefore it was dropped in favor of the memory sampling approach, which reads the current process memory usage.</p> <p>The GPU allocated and peak memory reporting is done with <code>torch.cuda.memory_allocated()</code> and <code>torch.cuda.max_memory_allocated()</code>. This metric reports only “deltas” for pytorch-specific allocations, as <code>torch.cuda</code> memory management system doesn’t track any memory allocated outside of pytorch. For example, the very first cuda call typically loads CUDA kernels, which may take from 0.5 to 2GB of GPU memory.</p> <p>Note that this tracker doesn’t account for memory allocations outside of <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>’s <code>__init__</code>, <code>train</code>, <code>evaluate</code> and <code>predict</code> calls.</p> <p>Because <code>evaluation</code> calls may happen during <code>train</code>, we can’t handle nested invocations because <code>torch.cuda.max_memory_allocated</code> is a single counter, so if it gets reset by a nested eval call, <code>train</code>’s tracker will report incorrect info. If this <a href="https://github.com/pytorch/pytorch/issues/16266" rel="nofollow">pytorch issue</a> gets resolved it will be possible to change this class to be re-entrant. Until then we will only track the outer level of <code>train</code>, <code>evaluate</code> and <code>predict</code> methods. Which means that if <code>eval</code> is called during <code>train</code>, it’s the latter that will account for its memory usage and that of the former.</p> <p>This also means that if any other tool that is used along the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> calls <code>torch.cuda.reset_peak_memory_stats</code>, the gpu peak memory stats could be invalid. And the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> will disrupt the normal behavior of any such tools that rely on calling <code>torch.cuda.reset_peak_memory_stats</code> themselves.</p> <p>For best performance you may want to consider turning the memory profiling off for production runs.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.trainer_pt_utils.metrics_format"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>metrics_format</span></h4><!-- HTML_TAG_END --> <a id="transformers.trainer_pt_utils.metrics_format" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.trainer_pt_utils.metrics_format"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_pt_utils.py#L829" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">metrics<span class="opacity-60">: typing.Dict[str, float]</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>metrics (<code>Dict[str, float]</code>)</span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.trainer_pt_utils.metrics_format.metrics" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.trainer_pt_utils.metrics_format.metrics"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>metrics</strong> (<code>Dict[str, float]</code>) &#x2014; The metrics returned from train/evaluate/predict<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.trainer_pt_utils.metrics_format.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>metrics (<code>Dict[str, float]</code>)</p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The reformatted metrics</p> <!-- HTML_TAG_END --></p></div></div> <p>Reformat Trainer metrics values to a human-readable format</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.num_examples"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>num_examples</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.num_examples" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.num_examples"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L938" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dataloader<span class="opacity-60">: DataLoader</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Helper to get number of samples in a <code>DataLoader</code> by accessing its dataset.</p> <p>Will raise an exception if the underlying dataset does not implement method <code>__len__</code></p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.pop_callback"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>pop_callback</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.pop_callback" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.pop_callback"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L524" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">callback<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>TrainerCallback</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.pop_callback.callback" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.pop_callback.callback"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>callback</strong> (<code>type</code> or <code>TrainerCallback</code>) &#x2014; A <code>TrainerCallback</code> class or an instance of a <code>TrainerCallback</code>. In the first case, will pop the first member of that class found in the list of callbacks.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.Trainer.pop_callback.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>TrainerCallback</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The callback removed, if found.</p> <!-- HTML_TAG_END --></p></div></div> <p>Remove a callback from the current list of <code>TrainerCallback</code> and returns it.</p> <p>If the callback is not found, returns <code>None</code> (and no error is raised).</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.predict"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>predict</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.predict" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.predict"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L2303" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">test_dataset<span class="opacity-60">: Dataset</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ignore_keys<span class="opacity-60">: typing.Optional[typing.List[str]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">metric_key_prefix<span class="opacity-60">: str = &#39;test&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.predict.test_dataset" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.predict.test_dataset"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>test_dataset</strong> (<code>Dataset</code>) &#x2014; Dataset to run the predictions on. If it is an <code>datasets.Dataset</code>, columns not accepted by the <code>model.forward()</code> method are automatically removed. Has to implement the method <code>__len__</code><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.predict.ignore_keys" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.predict.ignore_keys"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ignore_keys</strong> (<code>Lst[str]</code>, <em>optional</em>) &#x2014; A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.predict.metric_key_prefix" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.predict.metric_key_prefix"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>metric_key_prefix</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;test&quot;</code>) &#x2014; An optional prefix to be used as the metrics key prefix. For example the metrics &#x201C;bleu&#x201D; will be named &#x201C;test_bleu&#x201D; if the prefix is &#x201C;test&#x201D; (default)<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Run prediction and returns predictions and potential metrics.</p> <p>Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method will also return metrics, like in <code>evaluate()</code>.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If your predictions or labels have different sequence length (for instance because you’re doing dynamic padding in a token classification task) the predictions will be padded (on the right) to allow for concatenation into one array. The padding index is -100.</p></div> <p>Returns: <em>NamedTuple</em> A namedtuple with the following keys:</p> <ul><li>predictions (<code>np.ndarray</code>): The predictions on <code>test_dataset</code>.</li> <li>label_ids (<code>np.ndarray</code>, <em>optional</em>): The labels (if the dataset contained some).</li> <li>metrics (<code>Dict[str, float]</code>, <em>optional</em>): The potential dictionary of metrics (if the dataset contained labels).</li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.prediction_loop"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>prediction_loop</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.prediction_loop" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.prediction_loop"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L2878" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dataloader<span class="opacity-60">: DataLoader</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">description<span class="opacity-60">: str</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">prediction_loss_only<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ignore_keys<span class="opacity-60">: typing.Optional[typing.List[str]] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">metric_key_prefix<span class="opacity-60">: str = &#39;eval&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Prediction/evaluation loop, shared by <code>Trainer.evaluate()</code> and <code>Trainer.predict()</code>.</p> <p>Works both with or without labels.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.prediction_step"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>prediction_step</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.prediction_step" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.prediction_step"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L2586" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model<span class="opacity-60">: Module</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60">: typing.Dict[str, typing.Union[torch.Tensor, typing.Any]]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">prediction_loss_only<span class="opacity-60">: bool</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ignore_keys<span class="opacity-60">: typing.Optional[typing.List[str]] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]</span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.prediction_step.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.prediction_step.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<code>nn.Module</code>) &#x2014; The model to evaluate.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.prediction_step.inputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.prediction_step.inputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs</strong> (<code>Dict[str, Union[torch.Tensor, Any]]</code>) &#x2014; The inputs and targets of the model.</p> <p>The dictionary will be unpacked before being fed to the model. Most models expect the targets under the argument <code>labels</code>. Check your model&#x2019;s documentation for all accepted arguments.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.prediction_step.prediction_loss_only" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.prediction_step.prediction_loss_only"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>prediction_loss_only</strong> (<code>bool</code>) &#x2014; Whether or not to return the loss only.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.prediction_step.ignore_keys" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.prediction_step.ignore_keys"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ignore_keys</strong> (<code>Lst[str]</code>, <em>optional</em>) &#x2014; A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.Trainer.prediction_step.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]</p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A tuple with the loss, logits and labels (each being optional).</p> <!-- HTML_TAG_END --></p></div></div> <p>Perform an evaluation step on <code>model</code> using <code>inputs</code>.</p> <p>Subclass and override to inject custom behavior.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.push_to_hub"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>push_to_hub</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.push_to_hub" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.push_to_hub"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L2824" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">commit_message<span class="opacity-60">: typing.Optional[str] = &#39;End of training&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">blocking<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.push_to_hub.commit_message" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.push_to_hub.commit_message"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>commit_message</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;End of training&quot;</code>) &#x2014; Message to commit while pushing.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.push_to_hub.blocking" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.push_to_hub.blocking"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>blocking</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether the function should return only when the <code>git push</code> has finished. kwargs &#x2014; Additional keyword arguments passed along to <code>create_model_card()</code><!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Upload <em>self.model</em> and <em>self.tokenizer</em> to the 🤗 model hub on the repo <em>self.args.hub_model_id</em>.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.remove_callback"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>remove_callback</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.remove_callback" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.remove_callback"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L540" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">callback<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.remove_callback.callback" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.remove_callback.callback"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>callback</strong> (<code>type</code> or <code>TrainerCallback</code>) &#x2014; A <code>TrainerCallback</code> class or an instance of a <code>TrainerCallback</code>. In the first case, will remove the first member of that class found in the list of callbacks.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Remove a callback from the current list of <code>TrainerCallback</code>.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.trainer_pt_utils.save_metrics"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save_metrics</span></h4><!-- HTML_TAG_END --> <a id="transformers.trainer_pt_utils.save_metrics" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.trainer_pt_utils.save_metrics"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_pt_utils.py#L945" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">split<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">metrics<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">combined<span class="opacity-60"> = True</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.trainer_pt_utils.save_metrics.split" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.trainer_pt_utils.save_metrics.split"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>split</strong> (<code>str</code>) &#x2014; Mode/split name: one of <code>train</code>, <code>eval</code>, <code>test</code>, <code>all</code><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.trainer_pt_utils.save_metrics.metrics" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.trainer_pt_utils.save_metrics.metrics"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>metrics</strong> (<code>Dict[str, float]</code>) &#x2014; The metrics returned from train/evaluate/predict<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.trainer_pt_utils.save_metrics.combined" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.trainer_pt_utils.save_metrics.combined"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>combined</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Creates combined metrics by updating <code>all_results.json</code> with metrics of this call<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Save metrics into a json file for that split, e.g. <code>train_results.json</code>.</p> <p>Under distributed environment this is done only for a process with rank 0.</p> <p>To understand the metrics please read the docstring of <code>log_metrics()</code> The only difference is that raw unformatted numbers are saved in the current method.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.save_model"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save_model</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.save_model" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.save_model"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L2063" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_dir<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">_internal_call<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Will save the model, so you can reload it using <code>from_pretrained()</code>.</p> <p>Will only save from the main process.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.trainer_pt_utils.save_state"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save_state</span></h4><!-- HTML_TAG_END --> <a id="transformers.trainer_pt_utils.save_state" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.trainer_pt_utils.save_state"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_pt_utils.py#L983" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Saves the Trainer state, since Trainer.save_model saves only the tokenizer with the model</p> <p>Under distributed environment this is done only for a process with rank 0.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.train"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>train</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.train" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.train"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L1102" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">resume_from_checkpoint<span class="opacity-60">: typing.Union[str, bool, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">trial<span class="opacity-60">: typing.Union[ForwardRef(&#39;optuna.Trial&#39;), typing.Dict[str, typing.Any]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ignore_keys_for_eval<span class="opacity-60">: typing.Optional[typing.List[str]] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.train.resume_from_checkpoint" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.train.resume_from_checkpoint"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>resume_from_checkpoint</strong> (<code>str</code> or <code>bool</code>, <em>optional</em>) &#x2014; If a <code>str</code>, local path to a saved checkpoint as saved by a previous instance of <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>. If a <code>bool</code> and equals <code>True</code>, load the last checkpoint in <em>args.output_dir</em> as saved by a previous instance of <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>. If present, training will resume from the model/optimizer/scheduler states loaded here.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.train.trial" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.train.trial"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>trial</strong> (<code>optuna.Trial</code> or <code>Dict[str, Any]</code>, <em>optional</em>) &#x2014; The trial run or the hyperparameter dictionary for hyperparameter search.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.train.ignore_keys_for_eval" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.train.ignore_keys_for_eval"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ignore_keys_for_eval</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions for evaluation during the training. kwargs &#x2014; Additional keyword arguments used to hide deprecated arguments<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Main training entry point.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Trainer.training_step"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>training_step</span></h4><!-- HTML_TAG_END --> <a id="transformers.Trainer.training_step" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Trainer.training_step"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer.py#L1971" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model<span class="opacity-60">: Module</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60">: typing.Dict[str, typing.Union[torch.Tensor, typing.Any]]</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>torch.Tensor</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.training_step.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.training_step.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<code>nn.Module</code>) &#x2014; The model to train.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Trainer.training_step.inputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Trainer.training_step.inputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs</strong> (<code>Dict[str, Union[torch.Tensor, Any]]</code>) &#x2014; The inputs and targets of the model.</p> <p>The dictionary will be unpacked before being fed to the model. Most models expect the targets under the argument <code>labels</code>. Check your model&#x2019;s documentation for all accepted arguments.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.Trainer.training_step.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>torch.Tensor</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The tensor with training loss on this batch.</p> <!-- HTML_TAG_END --></p></div></div> <p>Perform a training step on a batch of inputs.</p> <p>Subclass and override to inject custom behavior.</p></div></div> <h2 class="relative group"><a id="transformers.Seq2SeqTrainer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Seq2SeqTrainer </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Seq2SeqTrainer"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">Seq2SeqTrainer</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.Seq2SeqTrainer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Seq2SeqTrainer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_seq2seq.py#L30" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model<span class="opacity-60">: typing.Union[transformers.modeling_utils.PreTrainedModel, torch.nn.modules.module.Module] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args<span class="opacity-60">: TrainingArguments = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">data_collator<span class="opacity-60">: typing.Optional[DataCollator] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">train_dataset<span class="opacity-60">: typing.Optional[torch.utils.data.dataset.Dataset] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eval_dataset<span class="opacity-60">: typing.Optional[torch.utils.data.dataset.Dataset] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer<span class="opacity-60">: typing.Optional[transformers.tokenization_utils_base.PreTrainedTokenizerBase] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model_init<span class="opacity-60">: typing.Callable[[], transformers.modeling_utils.PreTrainedModel] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">compute_metrics<span class="opacity-60">: typing.Union[typing.Callable[[transformers.trainer_utils.EvalPrediction], typing.Dict], NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">callbacks<span class="opacity-60">: typing.Optional[typing.List[transformers.trainer_callback.TrainerCallback]] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">optimizers<span class="opacity-60">: typing.Tuple[torch.optim.optimizer.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None)</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">preprocess_logits_for_metrics<span class="opacity-60">: typing.Callable[[torch.Tensor, torch.Tensor], torch.Tensor] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Seq2SeqTrainer.evaluate"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>evaluate</span></h4><!-- HTML_TAG_END --> <a id="transformers.Seq2SeqTrainer.evaluate" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Seq2SeqTrainer.evaluate"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_seq2seq.py#L31" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eval_dataset<span class="opacity-60">: typing.Optional[torch.utils.data.dataset.Dataset] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ignore_keys<span class="opacity-60">: typing.Optional[typing.List[str]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">metric_key_prefix<span class="opacity-60">: str = &#39;eval&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_beams<span class="opacity-60">: typing.Optional[int] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainer.evaluate.eval_dataset" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainer.evaluate.eval_dataset"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eval_dataset</strong> (<code>Dataset</code>, <em>optional</em>) &#x2014; Pass a dataset if you wish to override <code>self.eval_dataset</code>. If it is an <code>datasets.Dataset</code>, columns not accepted by the <code>model.forward()</code> method are automatically removed. It must implement the <code>__len__</code> method.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainer.evaluate.ignore_keys" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainer.evaluate.ignore_keys"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ignore_keys</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainer.evaluate.metric_key_prefix" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainer.evaluate.metric_key_prefix"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>metric_key_prefix</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;eval&quot;</code>) &#x2014; An optional prefix to be used as the metrics key prefix. For example the metrics &#x201C;bleu&#x201D; will be named &#x201C;eval_bleu&#x201D; if the prefix is <code>&quot;eval&quot;</code> (default)<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainer.evaluate.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainer.evaluate.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; The maximum target length to use when predicting with the generate method.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainer.evaluate.num_beams" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainer.evaluate.num_beams"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_beams</strong> (<code>int</code>, <em>optional</em>) &#x2014; Number of beams for beam search that will be used when predicting with the generate method. 1 means no beam search.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Run evaluation and returns metrics.</p> <p>The calling script will be responsible for providing a method to compute metrics, as they are task-dependent (pass it to the init <code>compute_metrics</code> argument).</p> <p>You can also subclass and override this method to inject custom behavior.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Seq2SeqTrainer.predict"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>predict</span></h4><!-- HTML_TAG_END --> <a id="transformers.Seq2SeqTrainer.predict" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Seq2SeqTrainer.predict"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_seq2seq.py#L72" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">test_dataset<span class="opacity-60">: Dataset</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ignore_keys<span class="opacity-60">: typing.Optional[typing.List[str]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">metric_key_prefix<span class="opacity-60">: str = &#39;test&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_beams<span class="opacity-60">: typing.Optional[int] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainer.predict.test_dataset" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainer.predict.test_dataset"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>test_dataset</strong> (<code>Dataset</code>) &#x2014; Dataset to run the predictions on. If it is an <code>datasets.Dataset</code>, columns not accepted by the <code>model.forward()</code> method are automatically removed. Has to implement the method <code>__len__</code><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainer.predict.ignore_keys" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainer.predict.ignore_keys"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ignore_keys</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainer.predict.metric_key_prefix" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainer.predict.metric_key_prefix"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>metric_key_prefix</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;eval&quot;</code>) &#x2014; An optional prefix to be used as the metrics key prefix. For example the metrics &#x201C;bleu&#x201D; will be named &#x201C;eval_bleu&#x201D; if the prefix is <code>&quot;eval&quot;</code> (default)<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainer.predict.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainer.predict.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; The maximum target length to use when predicting with the generate method.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainer.predict.num_beams" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainer.predict.num_beams"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_beams</strong> (<code>int</code>, <em>optional</em>) &#x2014; Number of beams for beam search that will be used when predicting with the generate method. 1 means no beam search.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Run prediction and returns predictions and potential metrics.</p> <p>Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method will also return metrics, like in <code>evaluate()</code>.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If your predictions or labels have different sequence lengths (for instance because you’re doing dynamic padding in a token classification task) the predictions will be padded (on the right) to allow for concatenation into one array. The padding index is -100.</p></div> <p>Returns: <em>NamedTuple</em> A namedtuple with the following keys:</p> <ul><li>predictions (<code>np.ndarray</code>): The predictions on <code>test_dataset</code>.</li> <li>label_ids (<code>np.ndarray</code>, <em>optional</em>): The labels (if the dataset contained some).</li> <li>metrics (<code>Dict[str, float]</code>, <em>optional</em>): The potential dictionary of metrics (if the dataset contained labels).</li></ul></div></div> <h2 class="relative group"><a id="transformers.TrainingArguments" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TrainingArguments </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainingArguments"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TrainingArguments</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TrainingArguments" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainingArguments"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/training_args.py#L86" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_dir<span class="opacity-60">: str</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">overwrite_output_dir<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_train<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_eval<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_predict<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">evaluation_strategy<span class="opacity-60">: IntervalStrategy = &#39;no&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">prediction_loss_only<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">per_device_train_batch_size<span class="opacity-60">: int = 8</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">per_device_eval_batch_size<span class="opacity-60">: int = 8</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">per_gpu_train_batch_size<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">per_gpu_eval_batch_size<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">gradient_accumulation_steps<span class="opacity-60">: int = 1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eval_accumulation_steps<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">learning_rate<span class="opacity-60">: float = 5e-05</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">weight_decay<span class="opacity-60">: float = 0.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">adam_beta1<span class="opacity-60">: float = 0.9</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">adam_beta2<span class="opacity-60">: float = 0.999</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">adam_epsilon<span class="opacity-60">: float = 1e-08</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_grad_norm<span class="opacity-60">: float = 1.0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_train_epochs<span class="opacity-60">: float = 3.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_steps<span class="opacity-60">: int = -1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">lr_scheduler_type<span class="opacity-60">: SchedulerType = &#39;linear&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">warmup_ratio<span class="opacity-60">: float = 0.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">warmup_steps<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">log_level<span class="opacity-60">: typing.Optional[str] = &#39;passive&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">log_level_replica<span class="opacity-60">: typing.Optional[str] = &#39;passive&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">log_on_each_node<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logging_dir<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logging_strategy<span class="opacity-60">: IntervalStrategy = &#39;steps&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logging_first_step<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logging_steps<span class="opacity-60">: int = 500</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logging_nan_inf_filter<span class="opacity-60">: str = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_strategy<span class="opacity-60">: IntervalStrategy = &#39;steps&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_steps<span class="opacity-60">: int = 500</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_total_limit<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_on_each_node<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">no_cuda<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">seed<span class="opacity-60">: int = 42</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">data_seed<span class="opacity-60">: int = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bf16<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">fp16<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">fp16_opt_level<span class="opacity-60">: str = &#39;O1&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">half_precision_backend<span class="opacity-60">: str = &#39;auto&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bf16_full_eval<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">fp16_full_eval<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tf32<span class="opacity-60">: bool = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">local_rank<span class="opacity-60">: int = -1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">xpu_backend<span class="opacity-60">: str = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tpu_num_cores<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tpu_metrics_debug<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">debug<span class="opacity-60">: str = &#39;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dataloader_drop_last<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eval_steps<span class="opacity-60">: int = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dataloader_num_workers<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_index<span class="opacity-60">: int = -1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">run_name<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">disable_tqdm<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">remove_unused_columns<span class="opacity-60">: typing.Optional[bool] = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">label_names<span class="opacity-60">: typing.Optional[typing.List[str]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">load_best_model_at_end<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">metric_for_best_model<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">greater_is_better<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ignore_data_skip<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sharded_ddp<span class="opacity-60">: str = &#39;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">deepspeed<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">label_smoothing_factor<span class="opacity-60">: float = 0.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">optim<span class="opacity-60">: OptimizerNames = &#39;adamw_hf&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">adafactor<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">group_by_length<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">length_column_name<span class="opacity-60">: typing.Optional[str] = &#39;length&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">report_to<span class="opacity-60">: typing.Optional[typing.List[str]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ddp_find_unused_parameters<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ddp_bucket_cap_mb<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dataloader_pin_memory<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">skip_memory_metrics<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_legacy_prediction_loop<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">push_to_hub<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">resume_from_checkpoint<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hub_model_id<span class="opacity-60">: str = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hub_strategy<span class="opacity-60">: HubStrategy = &#39;every_save&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hub_token<span class="opacity-60">: str = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">gradient_checkpointing<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">fp16_backend<span class="opacity-60">: str = &#39;auto&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">push_to_hub_model_id<span class="opacity-60">: str = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">push_to_hub_organization<span class="opacity-60">: str = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">push_to_hub_token<span class="opacity-60">: str = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mp_parameters<span class="opacity-60">: str = &#39;&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.output_dir" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.output_dir"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_dir</strong> (<code>str</code>) &#x2014; The output directory where the model predictions and checkpoints will be written.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.overwrite_output_dir" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.overwrite_output_dir"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>overwrite_output_dir</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; If <code>True</code>, overwrite the content of the output directory. Use this to continue training if <code>output_dir</code> points to a checkpoint directory.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.do_train" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.do_train"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>do_train</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to run training or not. This argument is not directly used by <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>, it&#x2019;s intended to be used by your training/evaluation scripts instead. See the <a href="https://github.com/huggingface/transformers/tree/master/examples" rel="nofollow">example scripts</a> for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.do_eval" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.do_eval"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>do_eval</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to run evaluation on the validation set or not. Will be set to <code>True</code> if <code>evaluation_strategy</code> is different from <code>&quot;no&quot;</code>. This argument is not directly used by <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>, it&#x2019;s intended to be used by your training/evaluation scripts instead. See the <a href="https://github.com/huggingface/transformers/tree/master/examples" rel="nofollow">example scripts</a> for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.do_predict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.do_predict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>do_predict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to run predictions on the test set or not. This argument is not directly used by <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>, it&#x2019;s intended to be used by your training/evaluation scripts instead. See the <a href="https://github.com/huggingface/transformers/tree/master/examples" rel="nofollow">example scripts</a> for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.evaluation_strategy" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.evaluation_strategy"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>evaluation_strategy</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/trainer_utils#transformers.IntervalStrategy">IntervalStrategy</a>, <em>optional</em>, defaults to <code>&quot;no&quot;</code>) &#x2014; The evaluation strategy to adopt during training. Possible values are:</p> <ul> <li><code>&quot;no&quot;</code>: No evaluation is done during training.</li> <li><code>&quot;steps&quot;</code>: Evaluation is done (and logged) every <code>eval_steps</code>.</li> <li><code>&quot;epoch&quot;</code>: Evaluation is done at the end of each epoch.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.prediction_loss_only" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.prediction_loss_only"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>prediction_loss_only</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; When performing evaluation and generating predictions, only returns the loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.per_device_train_batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.per_device_train_batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>per_device_train_batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; The batch size per GPU/TPU core/CPU for training.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.per_device_eval_batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.per_device_eval_batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>per_device_eval_batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; The batch size per GPU/TPU core/CPU for evaluation.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.gradient_accumulation_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.gradient_accumulation_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>gradient_accumulation_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Number of updates steps to accumulate the gradients for, before performing a backward/update pass.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p>When using gradient accumulation, one step is counted as one step with backward pass. Therefore, logging, evaluation, save will be conducted every <code>gradient_accumulation_steps * xxx_step</code> training examples.</p> </div><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.eval_accumulation_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.eval_accumulation_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eval_accumulation_steps</strong> (<code>int</code>, <em>optional</em>) &#x2014; Number of predictions steps to accumulate the output tensors for, before moving the results to the CPU. If left unset, the whole predictions are accumulated on GPU/TPU before being moved to the CPU (faster but requires more memory).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.learning_rate" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.learning_rate"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>learning_rate</strong> (<code>float</code>, <em>optional</em>, defaults to 5e-5) &#x2014; The initial learning rate for <a href="/docs/transformers/pr_16143/en/main_classes/optimizer_schedules#transformers.AdamW">AdamW</a> optimizer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.weight_decay" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.weight_decay"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>weight_decay</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; The weight decay to apply (if not zero) to all layers except all bias and LayerNorm weights in <a href="/docs/transformers/pr_16143/en/main_classes/optimizer_schedules#transformers.AdamW">AdamW</a> optimizer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.adam_beta1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.adam_beta1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>adam_beta1</strong> (<code>float</code>, <em>optional</em>, defaults to 0.9) &#x2014; The beta1 hyperparameter for the <a href="/docs/transformers/pr_16143/en/main_classes/optimizer_schedules#transformers.AdamW">AdamW</a> optimizer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.adam_beta2" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.adam_beta2"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>adam_beta2</strong> (<code>float</code>, <em>optional</em>, defaults to 0.999) &#x2014; The beta2 hyperparameter for the <a href="/docs/transformers/pr_16143/en/main_classes/optimizer_schedules#transformers.AdamW">AdamW</a> optimizer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.adam_epsilon" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.adam_epsilon"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>adam_epsilon</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-8) &#x2014; The epsilon hyperparameter for the <a href="/docs/transformers/pr_16143/en/main_classes/optimizer_schedules#transformers.AdamW">AdamW</a> optimizer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.max_grad_norm" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.max_grad_norm"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_grad_norm</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; Maximum gradient norm (for gradient clipping).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.num_train_epochs(float," class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.num_train_epochs(float,"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_train_epochs(<code>float</code>,</strong> <em>optional</em>, defaults to 3.0) &#x2014; Total number of training epochs to perform (if not an integer, will perform the decimal part percents of the last epoch before stopping training).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.max_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.max_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_steps</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; If set to a positive number, the total number of training steps to perform. Overrides <code>num_train_epochs</code>. In case of using a finite iterable dataset the training may stop before reaching the set number of steps when all data is exhausted<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.lr_scheduler_type" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.lr_scheduler_type"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>lr_scheduler_type</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/main_classes/optimizer_schedules#transformers.SchedulerType">SchedulerType</a>, <em>optional</em>, defaults to <code>&quot;linear&quot;</code>) &#x2014; The scheduler type to use. See the documentation of <a href="/docs/transformers/pr_16143/en/main_classes/optimizer_schedules#transformers.SchedulerType">SchedulerType</a> for all possible values.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.warmup_ratio" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.warmup_ratio"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>warmup_ratio</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; Ratio of total training steps used for a linear warmup from 0 to <code>learning_rate</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.warmup_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.warmup_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>warmup_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; Number of steps used for a linear warmup from 0 to <code>learning_rate</code>. Overrides any effect of <code>warmup_ratio</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.log_level" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.log_level"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>log_level</strong> (<code>str</code>, <em>optional</em>, defaults to <code>passive</code>) &#x2014; Logger log level to use on the main process. Possible choices are the log levels as strings: &#x2018;debug&#x2019;, &#x2018;info&#x2019;, &#x2018;warning&#x2019;, &#x2018;error&#x2019; and &#x2018;critical&#x2019;, plus a &#x2018;passive&#x2019; level which doesn&#x2019;t set anything and lets the application set the level.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.log_level_replica" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.log_level_replica"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>log_level_replica</strong> (<code>str</code>, <em>optional</em>, defaults to <code>passive</code>) &#x2014; Logger log level to use on replicas. Same choices as <code>log_level</code>&#x201D;<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.log_on_each_node" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.log_on_each_node"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>log_on_each_node</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; In multinode distributed training, whether to log using <code>log_level</code> once per node, or only on the main node.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.logging_dir" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.logging_dir"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logging_dir</strong> (<code>str</code>, <em>optional</em>) &#x2014; <a href="https://www.tensorflow.org/tensorboard" rel="nofollow">TensorBoard</a> log directory. Will default to *output_dir/runs/<strong>CURRENT_DATETIME_HOSTNAME*</strong>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.logging_strategy" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.logging_strategy"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logging_strategy</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/trainer_utils#transformers.IntervalStrategy">IntervalStrategy</a>, <em>optional</em>, defaults to <code>&quot;steps&quot;</code>) &#x2014; The logging strategy to adopt during training. Possible values are:</p> <ul> <li><code>&quot;no&quot;</code>: No logging is done during training.</li> <li><code>&quot;epoch&quot;</code>: Logging is done at the end of each epoch.</li> <li><code>&quot;steps&quot;</code>: Logging is done every <code>logging_steps</code>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.logging_first_step" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.logging_first_step"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logging_first_step</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to log and evaluate the first <code>global_step</code> or not.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.logging_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.logging_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logging_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 500) &#x2014; Number of update steps between two logs if <code>logging_strategy=&quot;steps&quot;</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.logging_nan_inf_filter" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.logging_nan_inf_filter"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logging_nan_inf_filter</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to filter <code>nan</code> and <code>inf</code> losses for logging. If set to <code>True</code> the loss of every step that is <code>nan</code> or <code>inf</code> is filtered and the average loss of the current logging window is taken instead.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p><code>logging_nan_inf_filter</code> only influences the logging of loss values, it does not change the behavior the gradient is computed or applied to the model.</p> </div><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.save_strategy" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.save_strategy"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>save_strategy</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/trainer_utils#transformers.IntervalStrategy">IntervalStrategy</a>, <em>optional</em>, defaults to <code>&quot;steps&quot;</code>) &#x2014; The checkpoint save strategy to adopt during training. Possible values are:</p> <ul> <li><code>&quot;no&quot;</code>: No save is done during training.</li> <li><code>&quot;epoch&quot;</code>: Save is done at the end of each epoch.</li> <li><code>&quot;steps&quot;</code>: Save is done every <code>save_steps</code>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.save_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.save_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>save_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 500) &#x2014; Number of updates steps before two checkpoint saves if <code>save_strategy=&quot;steps&quot;</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.save_total_limit" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.save_total_limit"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>save_total_limit</strong> (<code>int</code>, <em>optional</em>) &#x2014; If a value is passed, will limit the total amount of checkpoints. Deletes the older checkpoints in <code>output_dir</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.save_on_each_node" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.save_on_each_node"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>save_on_each_node</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; When doing multi-node distributed training, whether to save models and checkpoints on each node, or only on the main one.</p> <p>This should not be activated when the different nodes use the same storage as the files will be saved with the same names for each node.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.no_cuda" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.no_cuda"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>no_cuda</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to not use CUDA even when it is available or not.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.seed" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.seed"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>seed</strong> (<code>int</code>, <em>optional</em>, defaults to 42) &#x2014; Random seed that will be set at the beginning of training. To ensure reproducibility across runs, use the <code>model_init</code> function to instantiate the model if it has some randomly initialized parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.data_seed" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.data_seed"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>data_seed</strong> (<code>int</code>, <em>optional</em>) &#x2014; Random seed to be used with data samplers. If not set, random generators for data sampling will use the same seed as <code>seed</code>. This can be used to ensure reproducibility of data sampling, independent of the model seed.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.bf16" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.bf16"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bf16</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use bf16 16-bit (mixed) precision training instead of 32-bit training. Requires Ampere or higher NVIDIA architecture. This is an experimental API and it may change.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.fp16" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.fp16"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>fp16</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use fp16 16-bit (mixed) precision training instead of 32-bit training.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.fp16_opt_level" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.fp16_opt_level"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>fp16_opt_level</strong> (<code>str</code>, <em>optional</em>, defaults to &#x2018;O1&#x2019;) &#x2014; For <code>fp16</code> training, Apex AMP optimization level selected in [&#x2018;O0&#x2019;, &#x2018;O1&#x2019;, &#x2018;O2&#x2019;, and &#x2018;O3&#x2019;]. See details on the <a href="https://nvidia.github.io/apex/amp" rel="nofollow">Apex documentation</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.fp16_backend" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.fp16_backend"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>fp16_backend</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;auto&quot;</code>) &#x2014; This argument is deprecated. Use <code>half_precision_backend</code> instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.half_precision_backend" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.half_precision_backend"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>half_precision_backend</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;auto&quot;</code>) &#x2014; The backend to use for mixed precision training. Must be one of <code>&quot;auto&quot;</code>, <code>&quot;amp&quot;</code> or <code>&quot;apex&quot;</code>. <code>&quot;auto&quot;</code> will use AMP or APEX depending on the PyTorch version detected, while the other choices will force the requested backend.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.bf16_full_eval" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.bf16_full_eval"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bf16_full_eval</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use full bfloat16 evaluation instead of 32-bit. This will be faster and save memory but can harm metric values. This is an experimental API and it may change.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.fp16_full_eval" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.fp16_full_eval"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>fp16_full_eval</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use full float16 evaluation instead of 32-bit. This will be faster and save memory but can harm metric values.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.tf32" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.tf32"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tf32</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to enable tf32 mode, available in Ampere and newer GPU architectures. This is an experimental API and it may change.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.local_rank" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.local_rank"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>local_rank</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Rank of the process during distributed training.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.xpu_backend" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.xpu_backend"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>xpu_backend</strong> (<code>str</code>, <em>optional</em>) &#x2014; The backend to use for xpu distributed training. Must be one of <code>&quot;mpi&quot;</code> or <code>&quot;ccl&quot;</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.tpu_num_cores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.tpu_num_cores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tpu_num_cores</strong> (<code>int</code>, <em>optional</em>) &#x2014; When training on TPU, the number of TPU cores (automatically passed by launcher script).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.dataloader_drop_last" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.dataloader_drop_last"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dataloader_drop_last</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to drop the last incomplete batch (if the length of the dataset is not divisible by the batch size) or not.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.eval_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.eval_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eval_steps</strong> (<code>int</code>, <em>optional</em>) &#x2014; Number of update steps between two evaluations if <code>evaluation_strategy=&quot;steps&quot;</code>. Will default to the same value as <code>logging_steps</code> if not set.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.dataloader_num_workers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.dataloader_num_workers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dataloader_num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; Number of subprocesses to use for data loading (PyTorch only). 0 means that the data will be loaded in the main process.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.past_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.past_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_index</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Some models like <a href="../model_doc/transformerxl">TransformerXL</a> or <a href="../model_doc/xlnet">XLNet</a> can make use of the past hidden states for their predictions. If this argument is set to a positive int, the <code>Trainer</code> will use the corresponding output (usually index 2) as the past state and feed it to the model at the next training step under the keyword argument <code>mems</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.run_name" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.run_name"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>run_name</strong> (<code>str</code>, <em>optional</em>) &#x2014; A descriptor for the run. Typically used for <a href="https://www.wandb.com/" rel="nofollow">wandb</a> and <a href="https://www.mlflow.org/" rel="nofollow">mlflow</a> logging.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.disable_tqdm" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.disable_tqdm"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>disable_tqdm</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to disable the tqdm progress bars and table of metrics produced by <code>NotebookTrainingTracker</code> in Jupyter Notebooks. Will default to <code>True</code> if the logging level is set to warn or lower (default), <code>False</code> otherwise.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.remove_unused_columns" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.remove_unused_columns"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>remove_unused_columns</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If using <code>datasets.Dataset</code> datasets, whether or not to automatically remove the columns unused by the model forward method.</p> <p>(Note that this behavior is not implemented for <code>TFTrainer</code>yet.)<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.label_names" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.label_names"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>label_names</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; The list of keys in your dictionary of inputs that correspond to the labels.</p> <p>Will eventually default to <code>[&quot;labels&quot;]</code> except if the model used is one of the <code>XxxForQuestionAnswering</code> in which case it will default to <code>[&quot;start_positions&quot;, &quot;end_positions&quot;]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.load_best_model_at_end" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.load_best_model_at_end"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>load_best_model_at_end</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to load the best model found during training at the end of training.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When set to <code>True</code>, the parameters <code>save_strategy</code> needs to be the same as <code>eval_strategy</code>, and in the case it is &#x201C;steps&#x201D;, <code>save_steps</code> must be a round multiple of <code>eval_steps</code>.</p> </div><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.metric_for_best_model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.metric_for_best_model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>metric_for_best_model</strong> (<code>str</code>, <em>optional</em>) &#x2014; Use in conjunction with <code>load_best_model_at_end</code> to specify the metric to use to compare two different models. Must be the name of a metric returned by the evaluation with or without the prefix <code>&quot;eval_&quot;</code>. Will default to <code>&quot;loss&quot;</code> if unspecified and <code>load_best_model_at_end=True</code> (to use the evaluation loss).</p> <p>If you set this value, <code>greater_is_better</code> will default to <code>True</code>. Don&#x2019;t forget to set it to <code>False</code> if your metric is better when lower.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.greater_is_better" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.greater_is_better"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>greater_is_better</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Use in conjunction with <code>load_best_model_at_end</code> and <code>metric_for_best_model</code> to specify if better models should have a greater metric or not. Will default to:</p> <ul> <li><code>True</code> if <code>metric_for_best_model</code> is set to a value that isn&#x2019;t <code>&quot;loss&quot;</code> or <code>&quot;eval_loss&quot;</code>.</li> <li><code>False</code> if <code>metric_for_best_model</code> is not set, or set to <code>&quot;loss&quot;</code> or <code>&quot;eval_loss&quot;</code>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.ignore_data_skip" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.ignore_data_skip"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ignore_data_skip</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; When resuming training, whether or not to skip the epochs and batches to get the data loading at the same stage as in the previous training. If set to <code>True</code>, the training will begin faster (as that skipping step can take a long time) but will not yield the same results as the interrupted training would have.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.sharded_ddp" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.sharded_ddp"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sharded_ddp</strong> (<code>bool</code>, <code>str</code> or list of <code>ShardedDDPOption</code> <em>optional</em>, defaults to <code>False</code>) &#x2014; Use Sharded DDP training from <a href="https://github.com/facebookresearch/fairscale" rel="nofollow">FairScale</a> (in distributed training only). This is an experimental feature.</p> <p>A list of options along the following:</p> <ul> <li><code>&quot;simple&quot;</code>: to use first instance of sharded DDP released by fairscale (<code>ShardedDDP</code>) similar to ZeRO-2.</li> <li><code>&quot;zero_dp_2&quot;</code>: to use the second instance of sharded DPP released by fairscale (<code>FullyShardedDDP</code>) in Zero-2 mode (with <code>reshard_after_forward=False</code>).</li> <li><code>&quot;zero_dp_3&quot;</code>: to use the second instance of sharded DPP released by fairscale (<code>FullyShardedDDP</code>) in Zero-3 mode (with <code>reshard_after_forward=True</code>).</li> <li><code>&quot;offload&quot;</code>: to add ZeRO-offload (only compatible with <code>&quot;zero_dp_2&quot;</code> and <code>&quot;zero_dp_3&quot;</code>).</li> </ul> <p>If a string is passed, it will be split on space. If a bool is passed, it will be converted to an empty list for <code>False</code> and <code>[&quot;simple&quot;]</code> for <code>True</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.deepspeed" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.deepspeed"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>deepspeed</strong> (<code>str</code> or <code>dict</code>, <em>optional</em>) &#x2014; Use <a href="https://github.com/microsoft/deepspeed" rel="nofollow">Deepspeed</a>. This is an experimental feature and its API may evolve in the future. The value is either the location of DeepSpeed json config file (e.g., <code>ds_config.json</code>) or an already loaded json file as a <code>dict</code>&#x201D;<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.label_smoothing_factor" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.label_smoothing_factor"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>label_smoothing_factor</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The label smoothing factor to use. Zero means no label smoothing, otherwise the underlying onehot-encoded labels are changed from 0s and 1s to <code>label_smoothing_factor/num_labels</code> and <code>1 - label_smoothing_factor + label_smoothing_factor/num_labels</code> respectively.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.debug" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.debug"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>debug</strong> (<code>str</code> or list of <code>DebugOption</code> <em>optional</em>, defaults to <code>&quot;&quot;</code>) &#x2014; Enable one or more debug features. This is an experimental feature.</p> <p>Possible options are:</p> <ul> <li><code>&quot;underflow_overflow&quot;</code>: detects overflow in model&#x2019;s input/outputs and reports the last frames that led to the event</li> <li><code>&quot;tpu_metrics_debug&quot;</code>: print debug metrics on TPU</li> </ul> <p>The options should be separated by whitespaces.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.optim" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.optim"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>optim</strong> (<code>str</code> or <code>training_args.OptimizerNames</code> <em>optional</em>, defaults to <code>&quot;adamw_hf&quot;</code>) &#x2014; The optimizer to use: adamw_hf, adamw_torch, adamw_apex_fused, or adafactor.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.adafactor" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.adafactor"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>adafactor</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; This argument is deprecated. Use <code>--optim adafactor</code> instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.group_by_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.group_by_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>group_by_length</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to group together samples of roughly the same length in the training dataset (to minimize padding applied and be more efficient). Only useful if applying dynamic padding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.length_column_name" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.length_column_name"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>length_column_name</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;length&quot;</code>) &#x2014; Column name for precomputed lengths. If the column exists, grouping by length will use these values rather than computing them on train startup. Ignored unless <code>group_by_length</code> is <code>True</code> and the dataset is an instance of <code>Dataset</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.report_to" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.report_to"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>report_to</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>, defaults to <code>&quot;all&quot;</code>) &#x2014; The list of integrations to report the results and logs to. Supported platforms are <code>&quot;azure_ml&quot;</code>, <code>&quot;comet_ml&quot;</code>, <code>&quot;mlflow&quot;</code>, <code>&quot;tensorboard&quot;</code> and <code>&quot;wandb&quot;</code>. Use <code>&quot;all&quot;</code> to report to all integrations installed, <code>&quot;none&quot;</code> for no integrations.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.ddp_find_unused_parameters" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.ddp_find_unused_parameters"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ddp_find_unused_parameters</strong> (<code>bool</code>, <em>optional</em>) &#x2014; When using distributed training, the value of the flag <code>find_unused_parameters</code> passed to <code>DistributedDataParallel</code>. Will default to <code>False</code> if gradient checkpointing is used, <code>True</code> otherwise.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.ddp_bucket_cap_mb" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.ddp_bucket_cap_mb"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ddp_bucket_cap_mb</strong> (<code>int</code>, <em>optional</em>) &#x2014; When using distributed training, the value of the flag <code>bucket_cap_mb</code> passed to <code>DistributedDataParallel</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.dataloader_pin_memory" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.dataloader_pin_memory"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dataloader_pin_memory</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether you want to pin memory in data loaders or not. Will default to <code>True</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.skip_memory_metrics" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.skip_memory_metrics"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>skip_memory_metrics</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to skip adding of memory profiler reports to metrics. This is skipped by default because it slows down the training and evaluation speed.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.push_to_hub" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.push_to_hub"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>push_to_hub</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to push the model to the Hub every time the model is saved. If this is activated, <code>output_dir</code> will begin a git directory synced with the the repo (determined by <code>hub_model_id</code>) and the content will be pushed each time a save is triggered (depending on your <code>save_strategy</code>). Calling <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer.save_model">save_model()</a> will also trigger a push.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p>If <code>output_dir</code> exists, it needs to be a local clone of the repository to which the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> will be pushed.</p> </div><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.resume_from_checkpoint" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.resume_from_checkpoint"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>resume_from_checkpoint</strong> (<code>str</code>, <em>optional</em>) &#x2014; The path to a folder with a valid checkpoint for your model. This argument is not directly used by <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>, it&#x2019;s intended to be used by your training/evaluation scripts instead. See the <a href="https://github.com/huggingface/transformers/tree/master/examples" rel="nofollow">example scripts</a> for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.hub_model_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.hub_model_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hub_model_id</strong> (<code>str</code>, <em>optional</em>) &#x2014; The name of the repository to keep in sync with the local <em>output_dir</em>. It can be a simple model ID in which case the model will be pushed in your namespace. Otherwise it should be the whole repository name, for instance <code>&quot;user_name/model&quot;</code>, which allows you to push to an organization you are a member of with <code>&quot;organization_name/model&quot;</code>. Will default to <code>user_name/output_dir_name</code> with <em>output_dir_name</em> being the name of <code>output_dir</code>.</p> <p>Will default to to the name of <code>output_dir</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.hub_strategy" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.hub_strategy"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hub_strategy</strong> (<code>str</code> or <code>HubStrategy</code> <em>optional</em>, defaults to <code>&quot;every_save&quot;</code>) &#x2014; Defines the scope of what is pushed to the Hub and when. Possible values are:</p> <ul> <li><code>&quot;end&quot;</code>: push the model, its configuration, the tokenizer (if passed along to the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>) and a draft of a model card when the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer.save_model">save_model()</a> method is called.</li> <li><code>&quot;every_save&quot;</code>: push the model, its configuration, the tokenizer (if passed along to the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>) and a draft of a model card each time there is a model save. The pushes are asynchronous to not block training, and in case the save are very frequent, a new push is only attempted if the previous one is finished. A last push is made with the final model at the end of training.</li> <li><code>&quot;checkpoint&quot;</code>: like <code>&quot;every_save&quot;</code> but the latest checkpoint is also pushed in a subfolder named last-checkpoint, allowing you to resume training easily with <code>trainer.train(resume_from_checkpoint=&quot;last-checkpoint&quot;)</code>.</li> <li><code>&quot;all_checkpoints&quot;</code>: like <code>&quot;checkpoint&quot;</code> but all checkpoints are pushed like they appear in the output folder (so you will get one checkpoint folder per folder in your final repository)</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.hub_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.hub_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hub_token</strong> (<code>str</code>, <em>optional</em>) &#x2014; The token to use to push the model to the Hub. Will default to the token in the cache folder obtained with <code>huggingface-cli login</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.gradient_checkpointing" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.gradient_checkpointing"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>gradient_checkpointing</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; If True, use gradient checkpointing to save memory at the expense of slower backward pass.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>TrainingArguments is the subset of the arguments we use in our example scripts <strong>which relate to the training loop itself</strong>.</p> <p>Using <a href="/docs/transformers/pr_16143/en/internal/trainer_utils#transformers.HfArgumentParser">HfArgumentParser</a> we can turn this class into <a href="https://docs.python.org/3/library/argparse#module-argparse" rel="nofollow">argparse</a> arguments that can be specified on the command line.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainingArguments.get_process_log_level"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_process_log_level</span></h4><!-- HTML_TAG_END --> <a id="transformers.TrainingArguments.get_process_log_level" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainingArguments.get_process_log_level"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/training_args.py#L1199" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Returns the log level to be used depending on whether this process is the main process of node 0, main process of node non-0, or a non-main process.</p> <p>For the main process the log level defaults to <code>logging.INFO</code> unless overridden by <code>log_level</code> argument.</p> <p>For the replica processes the log level defaults to <code>logging.WARNING</code> unless overridden by <code>log_level_replica</code> argument.</p> <p>The choice between the main and replica process settings is made according to the return value of <code>should_log</code>.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainingArguments.get_warmup_steps"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_warmup_steps</span></h4><!-- HTML_TAG_END --> <a id="transformers.TrainingArguments.get_warmup_steps" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainingArguments.get_warmup_steps"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/training_args.py#L1285" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_training_steps<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Get number of steps used for a linear warmup.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainingArguments.main_process_first"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>main_process_first</span></h4><!-- HTML_TAG_END --> <a id="transformers.TrainingArguments.main_process_first" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainingArguments.main_process_first"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/training_args.py#L1230" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">local<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">desc<span class="opacity-60"> = &#39;work&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.main_process_first.local" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.main_process_first.local"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>local</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; if <code>True</code> first means process of rank 0 of each node if <code>False</code> first means process of rank 0 of node rank 0 In multi-node environment with a shared filesystem you most likely will want to use <code>local=False</code> so that only the main process of the first node will do the processing. If however, the filesystem is not shared, then the main process of each node will need to do the processing, which is the default behavior.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TrainingArguments.main_process_first.desc" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TrainingArguments.main_process_first.desc"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>desc</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;work&quot;</code>) &#x2014; a work description to be used in debug logs<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>A context manager for torch distributed environment where on needs to do something on the main process, while blocking replicas, and when it’s finished releasing the replicas.</p> <p>One such use is for <code>datasets</code>’s <code>map</code> feature which to be efficient should be run once on the main process, which upon completion saves a cached version of results and which then automatically gets loaded by the replicas.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainingArguments.to_dict"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>to_dict</span></h4><!-- HTML_TAG_END --> <a id="transformers.TrainingArguments.to_dict" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainingArguments.to_dict"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/training_args.py#L1294" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Serializes this instance while replace <code>Enum</code> by their values (for JSON serialization support). It obfuscates the token values by removing their value.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainingArguments.to_json_string"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>to_json_string</span></h4><!-- HTML_TAG_END --> <a id="transformers.TrainingArguments.to_json_string" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainingArguments.to_json_string"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/training_args.py#L1309" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Serializes this instance to a JSON string.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TrainingArguments.to_sanitized_dict"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>to_sanitized_dict</span></h4><!-- HTML_TAG_END --> <a id="transformers.TrainingArguments.to_sanitized_dict" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TrainingArguments.to_sanitized_dict"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/training_args.py#L1315" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Sanitized serialization to use with TensorBoard’s hparams</p></div></div> <h2 class="relative group"><a id="transformers.Seq2SeqTrainingArguments" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Seq2SeqTrainingArguments </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Seq2SeqTrainingArguments"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">Seq2SeqTrainingArguments</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.Seq2SeqTrainingArguments" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Seq2SeqTrainingArguments"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/training_args_seq2seq.py#L28" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_dir<span class="opacity-60">: str</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">overwrite_output_dir<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_train<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_eval<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_predict<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">evaluation_strategy<span class="opacity-60">: IntervalStrategy = &#39;no&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">prediction_loss_only<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">per_device_train_batch_size<span class="opacity-60">: int = 8</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">per_device_eval_batch_size<span class="opacity-60">: int = 8</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">per_gpu_train_batch_size<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">per_gpu_eval_batch_size<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">gradient_accumulation_steps<span class="opacity-60">: int = 1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eval_accumulation_steps<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">learning_rate<span class="opacity-60">: float = 5e-05</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">weight_decay<span class="opacity-60">: float = 0.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">adam_beta1<span class="opacity-60">: float = 0.9</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">adam_beta2<span class="opacity-60">: float = 0.999</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">adam_epsilon<span class="opacity-60">: float = 1e-08</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_grad_norm<span class="opacity-60">: float = 1.0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_train_epochs<span class="opacity-60">: float = 3.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_steps<span class="opacity-60">: int = -1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">lr_scheduler_type<span class="opacity-60">: SchedulerType = &#39;linear&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">warmup_ratio<span class="opacity-60">: float = 0.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">warmup_steps<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">log_level<span class="opacity-60">: typing.Optional[str] = &#39;passive&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">log_level_replica<span class="opacity-60">: typing.Optional[str] = &#39;passive&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">log_on_each_node<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logging_dir<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logging_strategy<span class="opacity-60">: IntervalStrategy = &#39;steps&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logging_first_step<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logging_steps<span class="opacity-60">: int = 500</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logging_nan_inf_filter<span class="opacity-60">: str = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_strategy<span class="opacity-60">: IntervalStrategy = &#39;steps&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_steps<span class="opacity-60">: int = 500</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_total_limit<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_on_each_node<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">no_cuda<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">seed<span class="opacity-60">: int = 42</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">data_seed<span class="opacity-60">: int = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bf16<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">fp16<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">fp16_opt_level<span class="opacity-60">: str = &#39;O1&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">half_precision_backend<span class="opacity-60">: str = &#39;auto&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bf16_full_eval<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">fp16_full_eval<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tf32<span class="opacity-60">: bool = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">local_rank<span class="opacity-60">: int = -1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">xpu_backend<span class="opacity-60">: str = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tpu_num_cores<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tpu_metrics_debug<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">debug<span class="opacity-60">: str = &#39;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dataloader_drop_last<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eval_steps<span class="opacity-60">: int = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dataloader_num_workers<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_index<span class="opacity-60">: int = -1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">run_name<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">disable_tqdm<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">remove_unused_columns<span class="opacity-60">: typing.Optional[bool] = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">label_names<span class="opacity-60">: typing.Optional[typing.List[str]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">load_best_model_at_end<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">metric_for_best_model<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">greater_is_better<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ignore_data_skip<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sharded_ddp<span class="opacity-60">: str = &#39;&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">deepspeed<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">label_smoothing_factor<span class="opacity-60">: float = 0.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">optim<span class="opacity-60">: OptimizerNames = &#39;adamw_hf&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">adafactor<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">group_by_length<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">length_column_name<span class="opacity-60">: typing.Optional[str] = &#39;length&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">report_to<span class="opacity-60">: typing.Optional[typing.List[str]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ddp_find_unused_parameters<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ddp_bucket_cap_mb<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dataloader_pin_memory<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">skip_memory_metrics<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_legacy_prediction_loop<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">push_to_hub<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">resume_from_checkpoint<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hub_model_id<span class="opacity-60">: str = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hub_strategy<span class="opacity-60">: HubStrategy = &#39;every_save&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hub_token<span class="opacity-60">: str = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">gradient_checkpointing<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">fp16_backend<span class="opacity-60">: str = &#39;auto&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">push_to_hub_model_id<span class="opacity-60">: str = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">push_to_hub_organization<span class="opacity-60">: str = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">push_to_hub_token<span class="opacity-60">: str = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mp_parameters<span class="opacity-60">: str = &#39;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sortish_sampler<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">predict_with_generate<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">generation_max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">generation_num_beams<span class="opacity-60">: typing.Optional[int] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.output_dir" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.output_dir"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_dir</strong> (<code>str</code>) &#x2014; The output directory where the model predictions and checkpoints will be written.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.overwrite_output_dir" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.overwrite_output_dir"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>overwrite_output_dir</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; If <code>True</code>, overwrite the content of the output directory. Use this to continue training if <code>output_dir</code> points to a checkpoint directory.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.do_train" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.do_train"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>do_train</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to run training or not. This argument is not directly used by <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>, it&#x2019;s intended to be used by your training/evaluation scripts instead. See the <a href="https://github.com/huggingface/transformers/tree/master/examples" rel="nofollow">example scripts</a> for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.do_eval" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.do_eval"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>do_eval</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to run evaluation on the validation set or not. Will be set to <code>True</code> if <code>evaluation_strategy</code> is different from <code>&quot;no&quot;</code>. This argument is not directly used by <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>, it&#x2019;s intended to be used by your training/evaluation scripts instead. See the <a href="https://github.com/huggingface/transformers/tree/master/examples" rel="nofollow">example scripts</a> for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.do_predict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.do_predict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>do_predict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to run predictions on the test set or not. This argument is not directly used by <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>, it&#x2019;s intended to be used by your training/evaluation scripts instead. See the <a href="https://github.com/huggingface/transformers/tree/master/examples" rel="nofollow">example scripts</a> for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.evaluation_strategy" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.evaluation_strategy"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>evaluation_strategy</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/trainer_utils#transformers.IntervalStrategy">IntervalStrategy</a>, <em>optional</em>, defaults to <code>&quot;no&quot;</code>) &#x2014; The evaluation strategy to adopt during training. Possible values are:</p> <ul> <li><code>&quot;no&quot;</code>: No evaluation is done during training.</li> <li><code>&quot;steps&quot;</code>: Evaluation is done (and logged) every <code>eval_steps</code>.</li> <li><code>&quot;epoch&quot;</code>: Evaluation is done at the end of each epoch.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.prediction_loss_only" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.prediction_loss_only"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>prediction_loss_only</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; When performing evaluation and generating predictions, only returns the loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.per_device_train_batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.per_device_train_batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>per_device_train_batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; The batch size per GPU/TPU core/CPU for training.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.per_device_eval_batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.per_device_eval_batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>per_device_eval_batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 8) &#x2014; The batch size per GPU/TPU core/CPU for evaluation.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.gradient_accumulation_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.gradient_accumulation_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>gradient_accumulation_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Number of updates steps to accumulate the gradients for, before performing a backward/update pass.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p>When using gradient accumulation, one step is counted as one step with backward pass. Therefore, logging, evaluation, save will be conducted every <code>gradient_accumulation_steps * xxx_step</code> training examples.</p> </div><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.eval_accumulation_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.eval_accumulation_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eval_accumulation_steps</strong> (<code>int</code>, <em>optional</em>) &#x2014; Number of predictions steps to accumulate the output tensors for, before moving the results to the CPU. If left unset, the whole predictions are accumulated on GPU/TPU before being moved to the CPU (faster but requires more memory).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.learning_rate" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.learning_rate"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>learning_rate</strong> (<code>float</code>, <em>optional</em>, defaults to 5e-5) &#x2014; The initial learning rate for <a href="/docs/transformers/pr_16143/en/main_classes/optimizer_schedules#transformers.AdamW">AdamW</a> optimizer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.weight_decay" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.weight_decay"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>weight_decay</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; The weight decay to apply (if not zero) to all layers except all bias and LayerNorm weights in <a href="/docs/transformers/pr_16143/en/main_classes/optimizer_schedules#transformers.AdamW">AdamW</a> optimizer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.adam_beta1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.adam_beta1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>adam_beta1</strong> (<code>float</code>, <em>optional</em>, defaults to 0.9) &#x2014; The beta1 hyperparameter for the <a href="/docs/transformers/pr_16143/en/main_classes/optimizer_schedules#transformers.AdamW">AdamW</a> optimizer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.adam_beta2" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.adam_beta2"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>adam_beta2</strong> (<code>float</code>, <em>optional</em>, defaults to 0.999) &#x2014; The beta2 hyperparameter for the <a href="/docs/transformers/pr_16143/en/main_classes/optimizer_schedules#transformers.AdamW">AdamW</a> optimizer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.adam_epsilon" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.adam_epsilon"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>adam_epsilon</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-8) &#x2014; The epsilon hyperparameter for the <a href="/docs/transformers/pr_16143/en/main_classes/optimizer_schedules#transformers.AdamW">AdamW</a> optimizer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.max_grad_norm" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.max_grad_norm"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_grad_norm</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; Maximum gradient norm (for gradient clipping).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.num_train_epochs(float," class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.num_train_epochs(float,"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_train_epochs(<code>float</code>,</strong> <em>optional</em>, defaults to 3.0) &#x2014; Total number of training epochs to perform (if not an integer, will perform the decimal part percents of the last epoch before stopping training).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.max_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.max_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_steps</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; If set to a positive number, the total number of training steps to perform. Overrides <code>num_train_epochs</code>. In case of using a finite iterable dataset the training may stop before reaching the set number of steps when all data is exhausted<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.lr_scheduler_type" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.lr_scheduler_type"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>lr_scheduler_type</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/main_classes/optimizer_schedules#transformers.SchedulerType">SchedulerType</a>, <em>optional</em>, defaults to <code>&quot;linear&quot;</code>) &#x2014; The scheduler type to use. See the documentation of <a href="/docs/transformers/pr_16143/en/main_classes/optimizer_schedules#transformers.SchedulerType">SchedulerType</a> for all possible values.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.warmup_ratio" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.warmup_ratio"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>warmup_ratio</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; Ratio of total training steps used for a linear warmup from 0 to <code>learning_rate</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.warmup_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.warmup_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>warmup_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; Number of steps used for a linear warmup from 0 to <code>learning_rate</code>. Overrides any effect of <code>warmup_ratio</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.log_level" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.log_level"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>log_level</strong> (<code>str</code>, <em>optional</em>, defaults to <code>passive</code>) &#x2014; Logger log level to use on the main process. Possible choices are the log levels as strings: &#x2018;debug&#x2019;, &#x2018;info&#x2019;, &#x2018;warning&#x2019;, &#x2018;error&#x2019; and &#x2018;critical&#x2019;, plus a &#x2018;passive&#x2019; level which doesn&#x2019;t set anything and lets the application set the level.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.log_level_replica" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.log_level_replica"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>log_level_replica</strong> (<code>str</code>, <em>optional</em>, defaults to <code>passive</code>) &#x2014; Logger log level to use on replicas. Same choices as <code>log_level</code>&#x201D;<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.log_on_each_node" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.log_on_each_node"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>log_on_each_node</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; In multinode distributed training, whether to log using <code>log_level</code> once per node, or only on the main node.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.logging_dir" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.logging_dir"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logging_dir</strong> (<code>str</code>, <em>optional</em>) &#x2014; <a href="https://www.tensorflow.org/tensorboard" rel="nofollow">TensorBoard</a> log directory. Will default to *output_dir/runs/<strong>CURRENT_DATETIME_HOSTNAME*</strong>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.logging_strategy" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.logging_strategy"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logging_strategy</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/trainer_utils#transformers.IntervalStrategy">IntervalStrategy</a>, <em>optional</em>, defaults to <code>&quot;steps&quot;</code>) &#x2014; The logging strategy to adopt during training. Possible values are:</p> <ul> <li><code>&quot;no&quot;</code>: No logging is done during training.</li> <li><code>&quot;epoch&quot;</code>: Logging is done at the end of each epoch.</li> <li><code>&quot;steps&quot;</code>: Logging is done every <code>logging_steps</code>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.logging_first_step" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.logging_first_step"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logging_first_step</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to log and evaluate the first <code>global_step</code> or not.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.logging_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.logging_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logging_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 500) &#x2014; Number of update steps between two logs if <code>logging_strategy=&quot;steps&quot;</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.logging_nan_inf_filter" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.logging_nan_inf_filter"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logging_nan_inf_filter</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to filter <code>nan</code> and <code>inf</code> losses for logging. If set to <code>True</code> the loss of every step that is <code>nan</code> or <code>inf</code> is filtered and the average loss of the current logging window is taken instead.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p><code>logging_nan_inf_filter</code> only influences the logging of loss values, it does not change the behavior the gradient is computed or applied to the model.</p> </div><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.save_strategy" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.save_strategy"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>save_strategy</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/trainer_utils#transformers.IntervalStrategy">IntervalStrategy</a>, <em>optional</em>, defaults to <code>&quot;steps&quot;</code>) &#x2014; The checkpoint save strategy to adopt during training. Possible values are:</p> <ul> <li><code>&quot;no&quot;</code>: No save is done during training.</li> <li><code>&quot;epoch&quot;</code>: Save is done at the end of each epoch.</li> <li><code>&quot;steps&quot;</code>: Save is done every <code>save_steps</code>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.save_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.save_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>save_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 500) &#x2014; Number of updates steps before two checkpoint saves if <code>save_strategy=&quot;steps&quot;</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.save_total_limit" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.save_total_limit"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>save_total_limit</strong> (<code>int</code>, <em>optional</em>) &#x2014; If a value is passed, will limit the total amount of checkpoints. Deletes the older checkpoints in <code>output_dir</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.save_on_each_node" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.save_on_each_node"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>save_on_each_node</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; When doing multi-node distributed training, whether to save models and checkpoints on each node, or only on the main one.</p> <p>This should not be activated when the different nodes use the same storage as the files will be saved with the same names for each node.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.no_cuda" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.no_cuda"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>no_cuda</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to not use CUDA even when it is available or not.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.seed" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.seed"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>seed</strong> (<code>int</code>, <em>optional</em>, defaults to 42) &#x2014; Random seed that will be set at the beginning of training. To ensure reproducibility across runs, use the <code>model_init</code> function to instantiate the model if it has some randomly initialized parameters.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.data_seed" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.data_seed"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>data_seed</strong> (<code>int</code>, <em>optional</em>) &#x2014; Random seed to be used with data samplers. If not set, random generators for data sampling will use the same seed as <code>seed</code>. This can be used to ensure reproducibility of data sampling, independent of the model seed.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.bf16" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.bf16"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bf16</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use bf16 16-bit (mixed) precision training instead of 32-bit training. Requires Ampere or higher NVIDIA architecture. This is an experimental API and it may change.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.fp16" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.fp16"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>fp16</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use fp16 16-bit (mixed) precision training instead of 32-bit training.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.fp16_opt_level" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.fp16_opt_level"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>fp16_opt_level</strong> (<code>str</code>, <em>optional</em>, defaults to &#x2018;O1&#x2019;) &#x2014; For <code>fp16</code> training, Apex AMP optimization level selected in [&#x2018;O0&#x2019;, &#x2018;O1&#x2019;, &#x2018;O2&#x2019;, and &#x2018;O3&#x2019;]. See details on the <a href="https://nvidia.github.io/apex/amp" rel="nofollow">Apex documentation</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.fp16_backend" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.fp16_backend"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>fp16_backend</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;auto&quot;</code>) &#x2014; This argument is deprecated. Use <code>half_precision_backend</code> instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.half_precision_backend" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.half_precision_backend"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>half_precision_backend</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;auto&quot;</code>) &#x2014; The backend to use for mixed precision training. Must be one of <code>&quot;auto&quot;</code>, <code>&quot;amp&quot;</code> or <code>&quot;apex&quot;</code>. <code>&quot;auto&quot;</code> will use AMP or APEX depending on the PyTorch version detected, while the other choices will force the requested backend.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.bf16_full_eval" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.bf16_full_eval"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bf16_full_eval</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use full bfloat16 evaluation instead of 32-bit. This will be faster and save memory but can harm metric values. This is an experimental API and it may change.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.fp16_full_eval" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.fp16_full_eval"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>fp16_full_eval</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to use full float16 evaluation instead of 32-bit. This will be faster and save memory but can harm metric values.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.tf32" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.tf32"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tf32</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to enable tf32 mode, available in Ampere and newer GPU architectures. This is an experimental API and it may change.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.local_rank" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.local_rank"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>local_rank</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Rank of the process during distributed training.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.xpu_backend" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.xpu_backend"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>xpu_backend</strong> (<code>str</code>, <em>optional</em>) &#x2014; The backend to use for xpu distributed training. Must be one of <code>&quot;mpi&quot;</code> or <code>&quot;ccl&quot;</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.tpu_num_cores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.tpu_num_cores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tpu_num_cores</strong> (<code>int</code>, <em>optional</em>) &#x2014; When training on TPU, the number of TPU cores (automatically passed by launcher script).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.dataloader_drop_last" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.dataloader_drop_last"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dataloader_drop_last</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to drop the last incomplete batch (if the length of the dataset is not divisible by the batch size) or not.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.eval_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.eval_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eval_steps</strong> (<code>int</code>, <em>optional</em>) &#x2014; Number of update steps between two evaluations if <code>evaluation_strategy=&quot;steps&quot;</code>. Will default to the same value as <code>logging_steps</code> if not set.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.dataloader_num_workers" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.dataloader_num_workers"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dataloader_num_workers</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; Number of subprocesses to use for data loading (PyTorch only). 0 means that the data will be loaded in the main process.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.past_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.past_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_index</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; Some models like <a href="../model_doc/transformerxl">TransformerXL</a> or <a href="../model_doc/xlnet">XLNet</a> can make use of the past hidden states for their predictions. If this argument is set to a positive int, the <code>Trainer</code> will use the corresponding output (usually index 2) as the past state and feed it to the model at the next training step under the keyword argument <code>mems</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.run_name" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.run_name"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>run_name</strong> (<code>str</code>, <em>optional</em>) &#x2014; A descriptor for the run. Typically used for <a href="https://www.wandb.com/" rel="nofollow">wandb</a> and <a href="https://www.mlflow.org/" rel="nofollow">mlflow</a> logging.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.disable_tqdm" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.disable_tqdm"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>disable_tqdm</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not to disable the tqdm progress bars and table of metrics produced by <code>NotebookTrainingTracker</code> in Jupyter Notebooks. Will default to <code>True</code> if the logging level is set to warn or lower (default), <code>False</code> otherwise.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.remove_unused_columns" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.remove_unused_columns"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>remove_unused_columns</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If using <code>datasets.Dataset</code> datasets, whether or not to automatically remove the columns unused by the model forward method.</p> <p>(Note that this behavior is not implemented for <code>TFTrainer</code>yet.)<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.label_names" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.label_names"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>label_names</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; The list of keys in your dictionary of inputs that correspond to the labels.</p> <p>Will eventually default to <code>[&quot;labels&quot;]</code> except if the model used is one of the <code>XxxForQuestionAnswering</code> in which case it will default to <code>[&quot;start_positions&quot;, &quot;end_positions&quot;]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.load_best_model_at_end" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.load_best_model_at_end"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>load_best_model_at_end</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to load the best model found during training at the end of training.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"> <p>When set to <code>True</code>, the parameters <code>save_strategy</code> needs to be the same as <code>eval_strategy</code>, and in the case it is &#x201C;steps&#x201D;, <code>save_steps</code> must be a round multiple of <code>eval_steps</code>.</p> </div><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.metric_for_best_model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.metric_for_best_model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>metric_for_best_model</strong> (<code>str</code>, <em>optional</em>) &#x2014; Use in conjunction with <code>load_best_model_at_end</code> to specify the metric to use to compare two different models. Must be the name of a metric returned by the evaluation with or without the prefix <code>&quot;eval_&quot;</code>. Will default to <code>&quot;loss&quot;</code> if unspecified and <code>load_best_model_at_end=True</code> (to use the evaluation loss).</p> <p>If you set this value, <code>greater_is_better</code> will default to <code>True</code>. Don&#x2019;t forget to set it to <code>False</code> if your metric is better when lower.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.greater_is_better" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.greater_is_better"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>greater_is_better</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Use in conjunction with <code>load_best_model_at_end</code> and <code>metric_for_best_model</code> to specify if better models should have a greater metric or not. Will default to:</p> <ul> <li><code>True</code> if <code>metric_for_best_model</code> is set to a value that isn&#x2019;t <code>&quot;loss&quot;</code> or <code>&quot;eval_loss&quot;</code>.</li> <li><code>False</code> if <code>metric_for_best_model</code> is not set, or set to <code>&quot;loss&quot;</code> or <code>&quot;eval_loss&quot;</code>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.ignore_data_skip" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.ignore_data_skip"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ignore_data_skip</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; When resuming training, whether or not to skip the epochs and batches to get the data loading at the same stage as in the previous training. If set to <code>True</code>, the training will begin faster (as that skipping step can take a long time) but will not yield the same results as the interrupted training would have.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.sharded_ddp" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.sharded_ddp"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sharded_ddp</strong> (<code>bool</code>, <code>str</code> or list of <code>ShardedDDPOption</code> <em>optional</em>, defaults to <code>False</code>) &#x2014; Use Sharded DDP training from <a href="https://github.com/facebookresearch/fairscale" rel="nofollow">FairScale</a> (in distributed training only). This is an experimental feature.</p> <p>A list of options along the following:</p> <ul> <li><code>&quot;simple&quot;</code>: to use first instance of sharded DDP released by fairscale (<code>ShardedDDP</code>) similar to ZeRO-2.</li> <li><code>&quot;zero_dp_2&quot;</code>: to use the second instance of sharded DPP released by fairscale (<code>FullyShardedDDP</code>) in Zero-2 mode (with <code>reshard_after_forward=False</code>).</li> <li><code>&quot;zero_dp_3&quot;</code>: to use the second instance of sharded DPP released by fairscale (<code>FullyShardedDDP</code>) in Zero-3 mode (with <code>reshard_after_forward=True</code>).</li> <li><code>&quot;offload&quot;</code>: to add ZeRO-offload (only compatible with <code>&quot;zero_dp_2&quot;</code> and <code>&quot;zero_dp_3&quot;</code>).</li> </ul> <p>If a string is passed, it will be split on space. If a bool is passed, it will be converted to an empty list for <code>False</code> and <code>[&quot;simple&quot;]</code> for <code>True</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.deepspeed" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.deepspeed"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>deepspeed</strong> (<code>str</code> or <code>dict</code>, <em>optional</em>) &#x2014; Use <a href="https://github.com/microsoft/deepspeed" rel="nofollow">Deepspeed</a>. This is an experimental feature and its API may evolve in the future. The value is either the location of DeepSpeed json config file (e.g., <code>ds_config.json</code>) or an already loaded json file as a <code>dict</code>&#x201D;<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.label_smoothing_factor" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.label_smoothing_factor"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>label_smoothing_factor</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014; The label smoothing factor to use. Zero means no label smoothing, otherwise the underlying onehot-encoded labels are changed from 0s and 1s to <code>label_smoothing_factor/num_labels</code> and <code>1 - label_smoothing_factor + label_smoothing_factor/num_labels</code> respectively.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.debug" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.debug"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>debug</strong> (<code>str</code> or list of <code>DebugOption</code> <em>optional</em>, defaults to <code>&quot;&quot;</code>) &#x2014; Enable one or more debug features. This is an experimental feature.</p> <p>Possible options are:</p> <ul> <li><code>&quot;underflow_overflow&quot;</code>: detects overflow in model&#x2019;s input/outputs and reports the last frames that led to the event</li> <li><code>&quot;tpu_metrics_debug&quot;</code>: print debug metrics on TPU</li> </ul> <p>The options should be separated by whitespaces.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.optim" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.optim"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>optim</strong> (<code>str</code> or <code>training_args.OptimizerNames</code> <em>optional</em>, defaults to <code>&quot;adamw_hf&quot;</code>) &#x2014; The optimizer to use: adamw_hf, adamw_torch, adamw_apex_fused, or adafactor.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.adafactor" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.adafactor"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>adafactor</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; This argument is deprecated. Use <code>--optim adafactor</code> instead.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.group_by_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.group_by_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>group_by_length</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to group together samples of roughly the same length in the training dataset (to minimize padding applied and be more efficient). Only useful if applying dynamic padding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.length_column_name" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.length_column_name"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>length_column_name</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;length&quot;</code>) &#x2014; Column name for precomputed lengths. If the column exists, grouping by length will use these values rather than computing them on train startup. Ignored unless <code>group_by_length</code> is <code>True</code> and the dataset is an instance of <code>Dataset</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.report_to" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.report_to"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>report_to</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>, defaults to <code>&quot;all&quot;</code>) &#x2014; The list of integrations to report the results and logs to. Supported platforms are <code>&quot;azure_ml&quot;</code>, <code>&quot;comet_ml&quot;</code>, <code>&quot;mlflow&quot;</code>, <code>&quot;tensorboard&quot;</code> and <code>&quot;wandb&quot;</code>. Use <code>&quot;all&quot;</code> to report to all integrations installed, <code>&quot;none&quot;</code> for no integrations.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.ddp_find_unused_parameters" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.ddp_find_unused_parameters"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ddp_find_unused_parameters</strong> (<code>bool</code>, <em>optional</em>) &#x2014; When using distributed training, the value of the flag <code>find_unused_parameters</code> passed to <code>DistributedDataParallel</code>. Will default to <code>False</code> if gradient checkpointing is used, <code>True</code> otherwise.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.ddp_bucket_cap_mb" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.ddp_bucket_cap_mb"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ddp_bucket_cap_mb</strong> (<code>int</code>, <em>optional</em>) &#x2014; When using distributed training, the value of the flag <code>bucket_cap_mb</code> passed to <code>DistributedDataParallel</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.dataloader_pin_memory" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.dataloader_pin_memory"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dataloader_pin_memory</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether you want to pin memory in data loaders or not. Will default to <code>True</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.skip_memory_metrics" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.skip_memory_metrics"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>skip_memory_metrics</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether to skip adding of memory profiler reports to metrics. This is skipped by default because it slows down the training and evaluation speed.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.push_to_hub" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.push_to_hub"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>push_to_hub</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to push the model to the Hub every time the model is saved. If this is activated, <code>output_dir</code> will begin a git directory synced with the the repo (determined by <code>hub_model_id</code>) and the content will be pushed each time a save is triggered (depending on your <code>save_strategy</code>). Calling <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer.save_model">save_model()</a> will also trigger a push.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p>If <code>output_dir</code> exists, it needs to be a local clone of the repository to which the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> will be pushed.</p> </div><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.resume_from_checkpoint" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.resume_from_checkpoint"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>resume_from_checkpoint</strong> (<code>str</code>, <em>optional</em>) &#x2014; The path to a folder with a valid checkpoint for your model. This argument is not directly used by <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>, it&#x2019;s intended to be used by your training/evaluation scripts instead. See the <a href="https://github.com/huggingface/transformers/tree/master/examples" rel="nofollow">example scripts</a> for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.hub_model_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.hub_model_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hub_model_id</strong> (<code>str</code>, <em>optional</em>) &#x2014; The name of the repository to keep in sync with the local <em>output_dir</em>. It can be a simple model ID in which case the model will be pushed in your namespace. Otherwise it should be the whole repository name, for instance <code>&quot;user_name/model&quot;</code>, which allows you to push to an organization you are a member of with <code>&quot;organization_name/model&quot;</code>. Will default to <code>user_name/output_dir_name</code> with <em>output_dir_name</em> being the name of <code>output_dir</code>.</p> <p>Will default to to the name of <code>output_dir</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.hub_strategy" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.hub_strategy"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hub_strategy</strong> (<code>str</code> or <code>HubStrategy</code> <em>optional</em>, defaults to <code>&quot;every_save&quot;</code>) &#x2014; Defines the scope of what is pushed to the Hub and when. Possible values are:</p> <ul> <li><code>&quot;end&quot;</code>: push the model, its configuration, the tokenizer (if passed along to the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>) and a draft of a model card when the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer.save_model">save_model()</a> method is called.</li> <li><code>&quot;every_save&quot;</code>: push the model, its configuration, the tokenizer (if passed along to the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>) and a draft of a model card each time there is a model save. The pushes are asynchronous to not block training, and in case the save are very frequent, a new push is only attempted if the previous one is finished. A last push is made with the final model at the end of training.</li> <li><code>&quot;checkpoint&quot;</code>: like <code>&quot;every_save&quot;</code> but the latest checkpoint is also pushed in a subfolder named last-checkpoint, allowing you to resume training easily with <code>trainer.train(resume_from_checkpoint=&quot;last-checkpoint&quot;)</code>.</li> <li><code>&quot;all_checkpoints&quot;</code>: like <code>&quot;checkpoint&quot;</code> but all checkpoints are pushed like they appear in the output folder (so you will get one checkpoint folder per folder in your final repository)</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.hub_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.hub_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hub_token</strong> (<code>str</code>, <em>optional</em>) &#x2014; The token to use to push the model to the Hub. Will default to the token in the cache folder obtained with <code>huggingface-cli login</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Seq2SeqTrainingArguments.gradient_checkpointing" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Seq2SeqTrainingArguments.gradient_checkpointing"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>gradient_checkpointing</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; If True, use gradient checkpointing to save memory at the expense of slower backward pass.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>TrainingArguments is the subset of the arguments we use in our example scripts <strong>which relate to the training loop itself</strong>.</p> <p>Using <a href="/docs/transformers/pr_16143/en/internal/trainer_utils#transformers.HfArgumentParser">HfArgumentParser</a> we can turn this class into <a href="https://docs.python.org/3/library/argparse#module-argparse" rel="nofollow">argparse</a> arguments that can be specified on the command line.</p> <p>sortish_sampler (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>): Whether to use a <em>sortish sampler</em> or not. Only possible if the underlying datasets are <em>Seq2SeqDataset</em> for now but will become generally available in the near future.</p> <p>It sorts the inputs according to lengths in order to minimize the padding size, with a bit of randomness for the training set. predict_with_generate (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>): Whether to use generate to calculate generative metrics (ROUGE, BLEU). generation_max_length (<code>int</code>, <em>optional</em>): The <code>max_length</code> to use on each evaluation loop when <code>predict_with_generate=True</code>. Will default to the <code>max_length</code> value of the model configuration. generation_num_beams (<code>int</code>, <em>optional</em>): The <code>num_beams</code> to use on each evaluation loop when <code>predict_with_generate=True</code>. Will default to the <code>num_beams</code> value of the model configuration.</p></div> <h2 class="relative group"><a id="checkpoints" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#checkpoints"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Checkpoints </span></h2> <p>By default, <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> will save all checkpoints in the <code>output_dir</code> you set in the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a> you are using. Those will go in subfolder named <code>checkpoint-xxx</code> with xxx being the step at which the training was at.</p> <p>Resuming training from a checkpoint can be done when calling <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer.train">Trainer.train()</a> with either:</p> <ul><li><code>resume_from_checkpoint=True</code> which will resume training from the latest checkpoint</li> <li><code>resume_from_checkpoint=checkpoint_dir</code> which will resume training from the specific checkpoint in the directory passed.</li></ul> <p>In addition, you can easily save your checkpoints on the Model Hub when using <code>push_to_hub=True</code>. By default, all the models saved in intermediate checkpoints are saved in different commits, but not the optimizer state. You can adapt the <code>hub-strategy</code> value of your <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a> to either:</p> <ul><li><code>&quot;checkpoint&quot;</code>: the latest checkpoint is also pushed in a subfolder named last-checkpoint, allowing you to resume training easily with <code>trainer.train(resume_from_checkpoint=&quot;output_dir/last-checkpoint&quot;)</code>.</li> <li><code>&quot;all_checkpoints&quot;</code>: all checkpoints are pushed like they appear in the output folder (so you will get one checkpoint folder per folder in your final repository)</li></ul> <h2 class="relative group"><a id="logging" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#logging"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Logging </span></h2> <p>By default <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> will use <code>logging.INFO</code> for the main process and <code>logging.WARNING</code> for the replicas if any.</p> <p>These defaults can be overridden to use any of the 5 <code>logging</code> levels with <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a>’s arguments:</p> <ul><li><code>log_level</code> - for the main process</li> <li><code>log_level_replica</code> - for the replicas</li></ul> <p>Further, if <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a>’s <code>log_on_each_node</code> is set to <code>False</code> only the main node will use the log level settings for its main process, all other nodes will use the log level settings for replicas.</p> <p>Note that <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> is going to set <code>transformers</code>’s log level separately for each node in its <code>Trainer.__init__()</code> So you may want to set this sooner (see the next example) if you tap into other <code>transformers</code> functionality before creating the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> object.</p> <p>Here is an example of how this can be used in an application:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->[...] logger = logging.getLogger(__name__) <span class="hljs-comment"># Setup logging</span> logging.basicConfig( <span class="hljs-built_in">format</span>=<span class="hljs-string">&quot;%(asctime)s - %(levelname)s - %(name)s - %(message)s&quot;</span>, datefmt=<span class="hljs-string">&quot;%m/%d/%Y %H:%M:%S&quot;</span>, handlers=[logging.StreamHandler(sys.stdout)], ) <span class="hljs-comment"># set the main code and the modules it uses to the same log-level according to the node</span> log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) trainer = Trainer(...)<!-- HTML_TAG_END --></pre></div> <p>And then if you only want to see warnings on the main node and all other nodes to not print any most likely duplicated warnings you could run it as:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->my_app.py ... --log_level warning --log_level_replica error<!-- HTML_TAG_END --></pre></div> <p>In the multi-node environment if you also don’t want the logs to repeat for each node’s main process, you will want to change the above to:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->my_app.py ... --log_level warning --log_level_replica error --log_on_each_node 0<!-- HTML_TAG_END --></pre></div> <p>and then only the main process of the first node will log at the “warning” level, and all other processes on the main node and all processes on other nodes will log at the “error” level.</p> <p>If you need your application to be as quiet as possible you could do:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->my_app.py ... --log_level error --log_level_replica error --log_on_each_node 0<!-- HTML_TAG_END --></pre></div> <p>(add <code>--log_on_each_node 0</code> if on multi-node environment)</p> <h2 class="relative group"><a id="randomness" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#randomness"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Randomness </span></h2> <p>When resuming from a checkpoint generated by <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> all efforts are made to restore the <em>python</em>, <em>numpy</em> and <em>pytorch</em> RNG states to the same states as they were at the moment of saving that checkpoint, which should make the “stop and resume” style of training as close as possible to non-stop training.</p> <p>However, due to various default non-deterministic pytorch settings this might not fully work. If you want full determinism please refer to <a href="https://pytorch.org/docs/stable/notes/randomness" rel="nofollow">Controlling sources of randomness</a>. As explained in the document, that some of those settings that make things deterministic (.e.g., <code>torch.backends.cudnn.deterministic</code>) may slow things down, therefore this can’t be done by default, but you can enable those yourself if needed.</p> <h2 class="relative group"><a id="specific-gpus-selection" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#specific-gpus-selection"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Specific GPUs Selection </span></h2> <p>Let’s discuss how you can tell your program which GPUs are to be used and in what order.</p> <p>When using <a href="https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html" rel="nofollow"><code>DistributedDataParallel</code></a> to use only a subset of your GPUs, you simply specify the number of GPUs to use. For example, if you have 4 GPUs, but you wish to use the first 2 you can do:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->python -m torch.distributed.launch --nproc_per_node=2 trainer-program.py ...<!-- HTML_TAG_END --></pre></div> <p>if you have either <a href="https://github.com/huggingface/accelerate" rel="nofollow"><code>accelerate</code></a> or <a href="https://github.com/microsoft/DeepSpeed" rel="nofollow"><code>deepspeed</code></a> installed you can also accomplish the same by using one of:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->accelerate launch --num_processes 2 trainer-program.py ...<!-- HTML_TAG_END --></pre></div> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->deepspeed --num_gpus 2 trainer-program.py ...<!-- HTML_TAG_END --></pre></div> <p>You don’t need to use the Accelerate or <a href="Deepspeed">the Deepspeed integration</a> features to use these launchers.</p> <p>Until now you were able to tell the program how many GPUs to use. Now let’s discuss how to select specific GPUs and control their order.</p> <p>The following environment variables help you control which GPUs to use and their order.</p> <p><strong><code>CUDA_VISIBLE_DEVICES</code></strong></p> <p>If you have multiple GPUs and you’d like to use only 1 or a few of those GPUs, set the environment variable <code>CUDA_VISIBLE_DEVICES</code> to a list of the GPUs to be used.</p> <p>For example, let’s say you have 4 GPUs: 0, 1, 2 and 3. To run only on the physical GPUs 0 and 2, you can do:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->CUDA_VISIBLE_DEVICES=0,2 python -m torch.distributed.launch trainer-program.py ...<!-- HTML_TAG_END --></pre></div> <p>So now pytorch will see only 2 GPUs, where your physical GPUs 0 and 2 are mapped to <code>cuda:0</code> and <code>cuda:1</code> correspondingly.</p> <p>You can even change their order:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->CUDA_VISIBLE_DEVICES=2,0 python -m torch.distributed.launch trainer-program.py ...<!-- HTML_TAG_END --></pre></div> <p>Here your physical GPUs 0 and 2 are mapped to <code>cuda:1</code> and <code>cuda:0</code> correspondingly.</p> <p>The above examples were all for <code>DistributedDataParallel</code> use pattern, but the same method works for <a href="https://pytorch.org/docs/stable/generated/torch.nn.DataParallel.html" rel="nofollow"><code>DataParallel</code></a> as well:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->CUDA_VISIBLE_DEVICES=2,0 python trainer-program.py ...<!-- HTML_TAG_END --></pre></div> <p>To emulate an environment without GPUs simply set this environment variable to an empty value like so:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->CUDA_VISIBLE_DEVICES= python trainer-program.py ...<!-- HTML_TAG_END --></pre></div> <p>As with any environment variable you can, of course, export those instead of adding these to the command line, as in:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-built_in">export</span> CUDA_VISIBLE_DEVICES=0,2 python -m torch.distributed.launch trainer-program.py ...<!-- HTML_TAG_END --></pre></div> <p>but this approach can be confusing since you may forget you set up the environment variable earlier and not understand why the wrong GPUs are used. Therefore, it’s a common practice to set the environment variable just for a specific run on the same command line as it’s shown in most examples of this section.</p> <p><strong><code>CUDA_DEVICE_ORDER</code></strong></p> <p>There is an additional environment variable <code>CUDA_DEVICE_ORDER</code> that controls how the physical devices are ordered. The two choices are:</p> <ol><li>ordered by PCIe bus IDs (matches <code>nvidia-smi</code>’s order) - this is the default.</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-built_in">export</span> CUDA_DEVICE_ORDER=PCI_BUS_ID<!-- HTML_TAG_END --></pre></div> <ol start="2"><li>ordered by GPU compute capabilities</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-built_in">export</span> CUDA_DEVICE_ORDER=FASTEST_FIRST<!-- HTML_TAG_END --></pre></div> <p>Most of the time you don’t need to care about this environment variable, but it’s very helpful if you have a lopsided setup where you have an old and a new GPUs physically inserted in such a way so that the slow older card appears to be first. One way to fix that is to swap the cards. But if you can’t swap the cards (e.g., if the cooling of the devices gets impacted) then setting <code>CUDA_DEVICE_ORDER=FASTEST_FIRST</code> will always put the newer faster card first. It’ll be somewhat confusing though since <code>nvidia-smi</code> will still report them in the PCIe order.</p> <p>The other solution to swapping the order is to use:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-built_in">export</span> CUDA_VISIBLE_DEVICES=1,0<!-- HTML_TAG_END --></pre></div> <p>In this example we are working with just 2 GPUs, but of course the same would apply to as many GPUs as your computer has.</p> <p>Also if you do set this environment variable it’s the best to set it in your <code>~/.bashrc</code> file or some other startup config file and forget about it.</p> <h2 class="relative group"><a id="trainer-integrations" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#trainer-integrations"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Trainer Integrations </span></h2> <p>The <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> has been extended to support libraries that may dramatically improve your training time and fit much bigger models.</p> <p>Currently it supports third party solutions, <a href="https://github.com/microsoft/DeepSpeed" rel="nofollow">DeepSpeed</a> and <a href="https://github.com/facebookresearch/fairscale/" rel="nofollow">FairScale</a>, which implement parts of the paper <a href="https://arxiv.org/abs/1910.02054" rel="nofollow">ZeRO: Memory Optimizations Toward Training Trillion Parameter Models, by Samyam Rajbhandari, Jeff Rasley, Olatunji Ruwase, Yuxiong He</a>.</p> <p>This provided support is new and experimental as of this writing.</p> <a id="zero-install-notes"></a> <h3 class="relative group"><a id="cuda-extension-installation-notes" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#cuda-extension-installation-notes"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>CUDA Extension Installation Notes </span></h3> <p>As of this writing, both FairScale and Deepspeed require compilation of CUDA C++ code, before they can be used.</p> <p>While all installation issues should be dealt with through the corresponding GitHub Issues of <a href="https://github.com/facebookresearch/fairscale/issues" rel="nofollow">FairScale</a> and <a href="https://github.com/microsoft/DeepSpeed/issues" rel="nofollow">Deepspeed</a>, there are a few common issues that one may encounter while building any PyTorch extension that needs to build CUDA extensions.</p> <p>Therefore, if you encounter a CUDA-related build issue while doing one of the following or both:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install fairscale pip install deepspeed<!-- HTML_TAG_END --></pre></div> <p>please, read the following notes first.</p> <p>In these notes we give examples for what to do when <code>pytorch</code> has been built with CUDA <code>10.2</code>. If your situation is different remember to adjust the version number to the one you are after.</p> <h4 class="relative group"><a id="possible-problem-1" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#possible-problem-1"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Possible problem #1 </span></h4> <p>While, Pytorch comes with its own CUDA toolkit, to build these two projects you must have an identical version of CUDA installed system-wide.</p> <p>For example, if you installed <code>pytorch</code> with <code>cudatoolkit==10.2</code> in the Python environment, you also need to have CUDA <code>10.2</code> installed system-wide.</p> <p>The exact location may vary from system to system, but <code>/usr/local/cuda-10.2</code> is the most common location on many Unix systems. When CUDA is correctly set up and added to the <code>PATH</code> environment variable, one can find the installation location by doing:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-built_in">which</span> nvcc<!-- HTML_TAG_END --></pre></div> <p>If you don’t have CUDA installed system-wide, install it first. You will find the instructions by using your favorite search engine. For example, if you’re on Ubuntu you may want to search for: <a href="https://www.google.com/search?q=ubuntu+cuda+10.2+install" rel="nofollow">ubuntu cuda 10.2 install</a>.</p> <h4 class="relative group"><a id="possible-problem-2" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#possible-problem-2"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Possible problem #2 </span></h4> <p>Another possible common problem is that you may have more than one CUDA toolkit installed system-wide. For example you may have:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->/usr/local/cuda-10.2 /usr/local/cuda-11.0<!-- HTML_TAG_END --></pre></div> <p>Now, in this situation you need to make sure that your <code>PATH</code> and <code>LD_LIBRARY_PATH</code> environment variables contain the correct paths to the desired CUDA version. Typically, package installers will set these to contain whatever the last version was installed. If you encounter the problem, where the package build fails because it can’t find the right CUDA version despite you having it installed system-wide, it means that you need to adjust the 2 aforementioned environment variables.</p> <p>First, you may look at their contents:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-built_in">echo</span> <span class="hljs-variable">$PATH</span> <span class="hljs-built_in">echo</span> <span class="hljs-variable">$LD_LIBRARY_PATH</span><!-- HTML_TAG_END --></pre></div> <p>so you get an idea of what is inside.</p> <p>It’s possible that <code>LD_LIBRARY_PATH</code> is empty.</p> <p><code>PATH</code> lists the locations of where executables can be found and <code>LD_LIBRARY_PATH</code> is for where shared libraries are to looked for. In both cases, earlier entries have priority over the later ones. <code>:</code> is used to separate multiple entries.</p> <p>Now, to tell the build program where to find the specific CUDA toolkit, insert the desired paths to be listed first by doing:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-built_in">export</span> PATH=/usr/local/cuda-10.2/bin:<span class="hljs-variable">$PATH</span> <span class="hljs-built_in">export</span> LD_LIBRARY_PATH=/usr/local/cuda-10.2/lib64:<span class="hljs-variable">$LD_LIBRARY_PATH</span><!-- HTML_TAG_END --></pre></div> <p>Note that we aren’t overwriting the existing values, but prepending instead.</p> <p>Of course, adjust the version number, the full path if need be. Check that the directories you assign actually do exist. <code>lib64</code> sub-directory is where the various CUDA <code>.so</code> objects, like <code>libcudart.so</code> reside, it’s unlikely that your system will have it named differently, but if it is adjust it to reflect your reality.</p> <h4 class="relative group"><a id="possible-problem-3" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#possible-problem-3"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Possible problem #3 </span></h4> <p>Some older CUDA versions may refuse to build with newer compilers. For example, you my have <code>gcc-9</code> but it wants <code>gcc-7</code>.</p> <p>There are various ways to go about it.</p> <p>If you can install the latest CUDA toolkit it typically should support the newer compiler.</p> <p>Alternatively, you could install the lower version of the compiler in addition to the one you already have, or you may already have it but it’s not the default one, so the build system can’t see it. If you have <code>gcc-7</code> installed but the build system complains it can’t find it, the following might do the trick:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->sudo <span class="hljs-built_in">ln</span> -s /usr/bin/gcc-7 /usr/local/cuda-10.2/bin/gcc sudo <span class="hljs-built_in">ln</span> -s /usr/bin/g++-7 /usr/local/cuda-10.2/bin/g++<!-- HTML_TAG_END --></pre></div> <p>Here, we are making a symlink to <code>gcc-7</code> from <code>/usr/local/cuda-10.2/bin/gcc</code> and since <code>/usr/local/cuda-10.2/bin/</code> should be in the <code>PATH</code> environment variable (see the previous problem’s solution), it should find <code>gcc-7</code> (and <code>g++7</code>) and then the build will succeed.</p> <p>As always make sure to edit the paths in the example to match your situation.</p> <h3 class="relative group"><a id="fairscale" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#fairscale"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FairScale </span></h3> <p>By integrating <a href="https://github.com/facebookresearch/fairscale/" rel="nofollow">FairScale</a> the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> provides support for the following features from <a href="https://arxiv.org/abs/1910.02054" rel="nofollow">the ZeRO paper</a>:</p> <ol><li>Optimizer State Sharding</li> <li>Gradient Sharding</li> <li>Model Parameters Sharding (new and very experimental)</li> <li>CPU offload (new and very experimental)</li></ol> <p>You will need at least two GPUs to use this feature.</p> <p><strong>Installation</strong>:</p> <p>Install the library via pypi:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install fairscale<!-- HTML_TAG_END --></pre></div> <p>or via <code>transformers</code>’ <code>extras</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install transformers[fairscale]<!-- HTML_TAG_END --></pre></div> <p>(available starting from <code>transformers==4.6.0</code>) or find more details on <a href="https://github.com/facebookresearch/fairscale/#installation" rel="nofollow">the FairScale’s GitHub page</a>.</p> <p>If you’re still struggling with the build, first make sure to read <a href="#zero-install-notes">CUDA Extension Installation Notes</a>.</p> <p>If it’s still not resolved the build issue, here are a few more ideas.</p> <p><code>fairscale</code> seems to have an issue with the recently introduced by pip build isolation feature. If you have a problem with it, you may want to try one of:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install fairscale --no-build-isolation .<!-- HTML_TAG_END --></pre></div> <p>or:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->git <span class="hljs-built_in">clone</span> https://github.com/facebookresearch/fairscale/ <span class="hljs-built_in">cd</span> fairscale <span class="hljs-built_in">rm</span> -r dist build python setup.py bdist_wheel pip uninstall -y fairscale pip install dist/fairscale-*.whl<!-- HTML_TAG_END --></pre></div> <p><code>fairscale</code> also has issues with building against pytorch-nightly, so if you use it you may have to try one of:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip uninstall -y fairscale; pip install fairscale --pre \ -f https://download.pytorch.org/whl/nightly/cu110/torch_nightly \ --no-cache --no-build-isolation<!-- HTML_TAG_END --></pre></div> <p>or:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install -v --disable-pip-version-check . \ -f https://download.pytorch.org/whl/nightly/cu110/torch_nightly --pre<!-- HTML_TAG_END --></pre></div> <p>Of course, adjust the urls to match the cuda version you use.</p> <p>If after trying everything suggested you still encounter build issues, please, proceed with the GitHub Issue of <a href="https://github.com/facebookresearch/fairscale/issues" rel="nofollow">FairScale</a>.</p> <p><strong>Usage</strong>:</p> <p>To use the first version of Sharded data-parallelism, add <code>--sharded_ddp simple</code> to the command line arguments, and make sure you have added the distributed launcher <code>-m torch.distributed.launch --nproc_per_node=NUMBER_OF_GPUS_YOU_HAVE</code> if you haven’t been using it already.</p> <p>For example here is how you could use it for <code>run_translation.py</code> with 2 GPUs:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->python -m torch.distributed.launch --nproc_per_node=2 examples/pytorch/translation/run_translation.py \ --model_name_or_path t5-small --per_device_train_batch_size 1 \ --output_dir output_dir --overwrite_output_dir \ --do_train --max_train_samples 500 --num_train_epochs 1 \ --dataset_name wmt16 --dataset_config <span class="hljs-string">&quot;ro-en&quot;</span> \ --source_lang en --target_lang ro \ --fp16 --sharded_ddp simple<!-- HTML_TAG_END --></pre></div> <p>Notes:</p> <ul><li>This feature requires distributed training (so multiple GPUs).</li> <li>It is not implemented for TPUs.</li> <li>It works with <code>--fp16</code> too, to make things even faster.</li> <li>One of the main benefits of enabling <code>--sharded_ddp simple</code> is that it uses a lot less GPU memory, so you should be able to use significantly larger batch sizes using the same hardware (e.g. 3x and even bigger) which should lead to significantly shorter training time.</li></ul> <ol start="3"><li>To use the second version of Sharded data-parallelism, add <code>--sharded_ddp zero_dp_2</code> or <code>--sharded_ddp zero_dp_3</code> to the command line arguments, and make sure you have added the distributed launcher <code>-m torch.distributed.launch --nproc_per_node=NUMBER_OF_GPUS_YOU_HAVE</code> if you haven’t been using it already.</li></ol> <p>For example here is how you could use it for <code>run_translation.py</code> with 2 GPUs:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->python -m torch.distributed.launch --nproc_per_node=2 examples/pytorch/translation/run_translation.py \ --model_name_or_path t5-small --per_device_train_batch_size 1 \ --output_dir output_dir --overwrite_output_dir \ --do_train --max_train_samples 500 --num_train_epochs 1 \ --dataset_name wmt16 --dataset_config <span class="hljs-string">&quot;ro-en&quot;</span> \ --source_lang en --target_lang ro \ --fp16 --sharded_ddp zero_dp_2<!-- HTML_TAG_END --></pre></div> <p><code>zero_dp_2</code> is an optimized version of the simple wrapper, while <code>zero_dp_3</code> fully shards model weights, gradients and optimizer states.</p> <p>Both are compatible with adding <code>cpu_offload</code> to enable ZeRO-offload (activate it like this: <code>--sharded_ddp &quot;zero_dp_2 cpu_offload&quot;</code>).</p> <p>Notes:</p> <ul><li>This feature requires distributed training (so multiple GPUs).</li> <li>It is not implemented for TPUs.</li> <li>It works with <code>--fp16</code> too, to make things even faster.</li> <li>The <code>cpu_offload</code> additional option requires <code>--fp16</code>.</li> <li>This is an area of active development, so make sure you have a source install of fairscale to use this feature as some bugs you encounter may have been fixed there already.</li></ul> <p>Known caveats:</p> <ul><li>This feature is incompatible with <code>--predict_with_generate</code> in the <em>run_translation.py</em> script.</li> <li>Using <code>--sharded_ddp zero_dp_3</code> requires wrapping each layer of the model in the special container <code>FullyShardedDataParallelism</code> of fairscale. It should be used with the option <code>auto_wrap</code> if you are not doing this yourself: <code>--sharded_ddp &quot;zero_dp_3 auto_wrap&quot;</code>.</li></ul> <p>Sections that were moved:</p> <p>[ <a href="./deepspeed#deepspeed-trainer-integration">DeepSpeed</a><a id="deepspeed"></a> | <a href="./deepspeed#deepspeed-installation">Installation</a><a id="installation"></a> | <a href="./deepspeed#deepspeed-multi-gpu">Deployment with multiple GPUs</a><a id="deployment-with-multiple-gpus"></a> | <a href="./deepspeed#deepspeed-one-gpu">Deployment with one GPU</a><a id="deployment-with-one-gpu"></a> | <a href="./deepspeed#deepspeed-notebook">Deployment in Notebooks</a><a id="deployment-in-notebooks"></a> | <a href="./deepspeed#deepspeed-config">Configuration</a><a id="configuration"></a> | <a href="./deepspeed#deepspeed-config-passing">Passing Configuration</a><a id="passing-configuration"></a> | <a href="./deepspeed#deepspeed-config-shared">Shared Configuration</a><a id="shared-configuration"></a> | <a href="./deepspeed#deepspeed-zero">ZeRO</a><a id="zero"></a> | <a href="./deepspeed#deepspeed-zero2-config">ZeRO-2 Config</a><a id="zero-2-config"></a> | <a href="./deepspeed#deepspeed-zero3-config">ZeRO-3 Config</a><a id="zero-3-config"></a> | <a href="./deepspeed#deepspeed-nvme">NVMe Support</a><a id="nvme-support"></a> | <a href="./deepspeed#deepspeed-zero2-zero3-performance">ZeRO-2 vs ZeRO-3 Performance</a><a id="zero-2-vs-zero-3-performance"></a> | <a href="./deepspeed#deepspeed-zero2-example">ZeRO-2 Example</a><a id="zero-2-example"></a> | <a href="./deepspeed#deepspeed-zero3-example">ZeRO-3 Example</a><a id="zero-3-example"></a> | <a href="./deepspeed#deepspeed-optimizer">Optimizer</a><a id="optimizer"></a> | <a href="./deepspeed#deepspeed-scheduler">Scheduler</a><a id="scheduler"></a> | <a href="./deepspeed#deepspeed-fp32">fp32 Precision</a><a id="fp32-precision"></a> | <a href="./deepspeed#deepspeed-amp">Automatic Mixed Precision</a><a id="automatic-mixed-precision"></a> | <a href="./deepspeed#deepspeed-bs">Batch Size</a><a id="batch-size"></a> | <a href="./deepspeed#deepspeed-grad-acc">Gradient Accumulation</a><a id="gradient-accumulation"></a> | <a href="./deepspeed#deepspeed-grad-clip">Gradient Clipping</a><a id="gradient-clipping"></a> | <a href="./deepspeed#deepspeed-weight-extraction">Getting The Model Weights Out</a><a id="getting-the-model-weights-out"></a> ]</p> <script type="module" data-hydrate="ij1589"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="ij1589"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/main_classes/trainer.mdx-a51a0aac.js") ], params: {} } }); </script>
432
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/main_classes/tokenizer.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;tokenizer&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.PreTrainedTokenizer&quot;,&quot;title&quot;:&quot;PreTrainedTokenizer&quot;},{&quot;local&quot;:&quot;transformers.PreTrainedTokenizerFast&quot;,&quot;title&quot;:&quot;PreTrainedTokenizerFast&quot;},{&quot;local&quot;:&quot;transformers.BatchEncoding&quot;,&quot;title&quot;:&quot;BatchEncoding&quot;}],&quot;title&quot;:&quot;Tokenizer&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/main_classes/tokenizer.mdx-187685a5.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="tokenizer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#tokenizer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Tokenizer </span></h1> <p>A tokenizer is in charge of preparing the inputs for a model. The library contains tokenizers for all the models. Most of the tokenizers are available in two flavors: a full python implementation and a “Fast” implementation based on the Rust library <a href="https://github.com/huggingface/tokenizers" rel="nofollow">🤗 Tokenizers</a>. The “Fast” implementations allows:</p> <ol><li>a significant speed-up in particular when doing batched tokenization and</li> <li>additional methods to map between the original string (character and words) and the token space (e.g. getting the index of the token comprising a given character or the span of characters corresponding to a given token). Currently no “Fast” implementation is available for the SentencePiece-based tokenizers (for T5, ALBERT, CamemBERT, XLM-RoBERTa and XLNet models).</li></ol> <p>The base classes <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a> and <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a> implement the common methods for encoding string inputs in model inputs (see below) and instantiating/saving python and “Fast” tokenizers either from a local file or directory or from a pretrained tokenizer provided by the library (downloaded from HuggingFace’s AWS S3 repository). They both rely on <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase">PreTrainedTokenizerBase</a> that contains the common methods, and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.SpecialTokensMixin">SpecialTokensMixin</a>.</p> <p><a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a> and <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a> thus implement the main methods for using all the tokenizers:</p> <ul><li>Tokenizing (splitting strings in sub-word token strings), converting tokens strings to ids and back, and encoding/decoding (i.e., tokenizing and converting to integers).</li> <li>Adding new tokens to the vocabulary in a way that is independent of the underlying structure (BPE, SentencePiece…).</li> <li>Managing special tokens (like mask, beginning-of-sentence, etc.): adding them, assigning them to attributes in the tokenizer for easy access and making sure they are not split during tokenization.</li></ul> <p><a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding">BatchEncoding</a> holds the output of the <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase">PreTrainedTokenizerBase</a>’s encoding methods (<code>__call__</code>, <code>encode_plus</code> and <code>batch_encode_plus</code>) and is derived from a Python dictionary. When the tokenizer is a pure python tokenizer, this class behaves just like a standard python dictionary and holds the various model inputs computed by these methods (<code>input_ids</code>, <code>attention_mask</code>…). When the tokenizer is a “Fast” tokenizer (i.e., backed by HuggingFace <a href="https://github.com/huggingface/tokenizers" rel="nofollow">tokenizers library</a>), this class provides in addition several advanced alignment methods which can be used to map between the original string (character and words) and the token space (e.g., getting the index of the token comprising a given character or the span of characters corresponding to a given token).</p> <h2 class="relative group"><a id="transformers.PreTrainedTokenizer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>PreTrainedTokenizer </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizer"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">PreTrainedTokenizer</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils.py#L333" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.model_max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.model_max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model_max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; The maximum length (in number of tokens) for the inputs to the transformer model. When the tokenizer is loaded with <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.from_pretrained">from_pretrained()</a>, this will be set to the value stored for the associated model in <code>max_model_input_sizes</code> (see above). If no value is provided, will default to VERY_LARGE_INTEGER (<code>int(1e30)</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.padding_side" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.padding_side"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding_side</strong> (<code>str</code>, <em>optional</em>) &#x2014; The side on which the model should have padding applied. Should be selected between [&#x2018;right&#x2019;, &#x2018;left&#x2019;]. Default value is picked from the class attribute of the same name.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.truncation_side" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.truncation_side"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>truncation_side</strong> (<code>str</code>, <em>optional</em>) &#x2014; The side on which the model should have truncation applied. Should be selected between [&#x2018;right&#x2019;, &#x2018;left&#x2019;]. Default value is picked from the class attribute of the same name.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.model_input_names" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.model_input_names"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model_input_names</strong> (<code>List[string]</code>, <em>optional</em>) &#x2014; The list of inputs accepted by the forward pass of the model (like <code>&quot;token_type_ids&quot;</code> or <code>&quot;attention_mask&quot;</code>). Default value is picked from the class attribute of the same name.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.bos_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.bos_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bos_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the beginning of a sentence. Will be associated to <code>self.bos_token</code> and <code>self.bos_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.eos_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.eos_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the end of a sentence. Will be associated to <code>self.eos_token</code> and <code>self.eos_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.unk_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.unk_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>unk_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing an out-of-vocabulary token. Will be associated to <code>self.unk_token</code> and <code>self.unk_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.sep_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.sep_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sep_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token separating two different sentences in the same input (used by BERT for instance). Will be associated to <code>self.sep_token</code> and <code>self.sep_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.pad_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.pad_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by attention mechanisms or loss computation. Will be associated to <code>self.pad_token</code> and <code>self.pad_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.cls_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.cls_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cls_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the class of the input (used by BERT for instance). Will be associated to <code>self.cls_token</code> and <code>self.cls_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.mask_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.mask_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mask_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing a masked token (used by masked-language modeling pretraining objectives, like BERT). Will be associated to <code>self.mask_token</code> and <code>self.mask_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.additional_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.additional_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>additional_special_tokens</strong> (tuple or list of <code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A tuple or a list of additional special tokens. Add them here to ensure they won&#x2019;t be split by the tokenization process. Will be associated to <code>self.additional_special_tokens</code> and <code>self.additional_special_tokens_ids</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for all slow tokenizers.</p> <p>Inherits from <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase">PreTrainedTokenizerBase</a>.</p> <p>Handle all the shared methods for tokenization and special tokens as well as methods downloading/caching/loading pretrained tokenizers as well as adding tokens to the vocabulary.</p> <p>This class also contain the added tokens in a unified way on top of all tokenizers so we don’t have to handle the specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece…).</p> <p>Class attributes (overridden by derived classes)</p> <ul><li><strong>vocab_files_names</strong> (<code>Dict[str, str]</code>) — A dictionary with, as keys, the <code>__init__</code> keyword name of each vocabulary file required by the model, and as associated values, the filename for saving the associated file (string).</li> <li><strong>pretrained_vocab_files_map</strong> (<code>Dict[str, Dict[str, str]]</code>) — A dictionary of dictionaries, with the high-level keys being the <code>__init__</code> keyword name of each vocabulary file required by the model, the low-level being the <code>short-cut-names</code> of the pretrained models with, as associated values, the <code>url</code> to the associated pretrained vocabulary file.</li> <li><strong>max_model_input_sizes</strong> (<code>Dict[str, Optional[int]]</code>) — A dictionary with, as keys, the <code>short-cut-names</code> of the pretrained models, and as associated values, the maximum length of the sequence inputs of this model, or <code>None</code> if the model has no maximum input size.</li> <li><strong>pretrained_init_configuration</strong> (<code>Dict[str, Dict[str, Any]]</code>) — A dictionary with, as keys, the <code>short-cut-names</code> of the pretrained models, and as associated values, a dictionary of specific arguments to pass to the <code>__init__</code> method of the tokenizer class for this pretrained model when loading the tokenizer with the <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.from_pretrained">from_pretrained()</a> method.</li> <li><strong>model_input_names</strong> (<code>List[str]</code>) — A list of inputs expected in the forward pass of the model.</li> <li><strong>padding_side</strong> (<code>str</code>) — The default value for the side on which the model should have padding applied. Should be <code>&#39;right&#39;</code> or <code>&#39;left&#39;</code>.</li> <li><strong>truncation_side</strong> (<code>str</code>) — The default value for the side on which the model should have truncation applied. Should be <code>&#39;right&#39;</code> or <code>&#39;left&#39;</code>.</li></ul> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L2379" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text<span class="opacity-60">: typing.Union[str, typing.List[str], typing.List[typing.List[str]]]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text_pair<span class="opacity-60">: typing.Union[str, typing.List[str], typing.List[typing.List[str]], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">add_special_tokens<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">padding<span class="opacity-60">: typing.Union[bool, str, transformers.file_utils.PaddingStrategy] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">truncation<span class="opacity-60">: typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">stride<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">is_split_into_words<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_to_multiple_of<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: typing.Union[str, transformers.file_utils.TensorType, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_token_type_ids<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_attention_mask<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_overflowing_tokens<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_special_tokens_mask<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_offsets_mapping<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_length<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">verbose<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.text" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.text"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set <code>is_split_into_words=True</code> (to lift the ambiguity with a batch of sequences).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.text_pair" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.text_pair"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text_pair</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set <code>is_split_into_words=True</code> (to lift the ambiguity with a batch of sequences).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.add_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.add_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.padding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.padding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.truncation" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.truncation"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.stride" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.stride"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.is_split_into_words" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.is_split_into_words"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.pad_to_multiple_of" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.pad_to_multiple_of"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.return_token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.return_token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_token_type_ids</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.return_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.return_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.return_overflowing_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.return_overflowing_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_overflowing_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with <code>truncation_strategy = longest_first</code> or <code>True</code>, an error is raised instead of returning overflowing tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.return_special_tokens_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.return_special_tokens_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_special_tokens_mask</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return special tokens mask information.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.return_offsets_mapping" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.return_offsets_mapping"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_offsets_mapping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return <code>(char_start, char_end)</code> for each token.</p> <p>This is only available on fast tokenizers inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>, if using Python&#x2019;s tokenizer, this method will raise <code>NotImplementedError</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.return_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.return_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_length</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the lengths of the encoded inputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.verbose" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.verbose"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>verbose</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to print more information and warnings. **kwargs &#x2014; passed to the <code>self.tokenize()</code> method<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PreTrainedTokenizerBase.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a> with the following fields:</p> <ul> <li> <p><strong>input_ids</strong> — List of token ids to be fed to a model.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> </li> <li> <p><strong>token_type_ids</strong> — List of token type ids to be fed to a model (when <code>return_token_type_ids=True</code> or if <em>“token_type_ids”</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a></p> </li> <li> <p><strong>attention_mask</strong> — List of indices specifying which tokens should be attended to by the model (when <code>return_attention_mask=True</code> or if <em>“attention_mask”</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> </li> <li> <p><strong>overflowing_tokens</strong> — List of overflowing tokens sequences (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>num_truncated_tokens</strong> — Number of tokens truncated (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>special_tokens_mask</strong> — List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when <code>add_special_tokens=True</code> and <code>return_special_tokens_mask=True</code>).</p> </li> <li> <p><strong>length</strong> — The length of the inputs (when <code>return_length=True</code>)</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.batch_decode"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>batch_decode</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.batch_decode" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.batch_decode"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L3250" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sequences<span class="opacity-60">: typing.Union[typing.List[int], typing.List[typing.List[int]], ForwardRef(&#39;np.ndarray&#39;), ForwardRef(&#39;torch.Tensor&#39;), ForwardRef(&#39;tf.Tensor&#39;)]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">skip_special_tokens<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">clean_up_tokenization_spaces<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[str]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_decode.sequences" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_decode.sequences"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequences</strong> (<code>Union[List[int], List[List[int]], np.ndarray, torch.Tensor, tf.Tensor]</code>) &#x2014; List of tokenized input ids. Can be obtained using the <code>__call__</code> method.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_decode.skip_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_decode.skip_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>skip_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to remove special tokens in the decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_decode.clean_up_tokenization_spaces" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_decode.clean_up_tokenization_spaces"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to clean up the tokenization spaces.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_decode.kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_decode.kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Will be passed to the underlying model specific decode method.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PreTrainedTokenizerBase.batch_decode.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[str]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The list of decoded sentences.</p> <!-- HTML_TAG_END --></p></div></div> <p>Convert a list of lists of token ids into a list of strings by calling decode.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.decode"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>decode</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.decode" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.decode"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L3283" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids<span class="opacity-60">: typing.Union[int, typing.List[int], ForwardRef(&#39;np.ndarray&#39;), ForwardRef(&#39;torch.Tensor&#39;), ForwardRef(&#39;tf.Tensor&#39;)]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">skip_special_tokens<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">clean_up_tokenization_spaces<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>str</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.decode.token_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.decode.token_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids</strong> (<code>Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]</code>) &#x2014; List of tokenized input ids. Can be obtained using the <code>__call__</code> method.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.decode.skip_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.decode.skip_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>skip_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to remove special tokens in the decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.decode.clean_up_tokenization_spaces" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.decode.clean_up_tokenization_spaces"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to clean up the tokenization spaces.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.decode.kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.decode.kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Will be passed to the underlying model specific decode method.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PreTrainedTokenizerBase.decode.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>str</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The decoded sentence.</p> <!-- HTML_TAG_END --></p></div></div> <p>Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces.</p> <p>Similar to doing <code>self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))</code>.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.encode"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>encode</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.encode" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.encode"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L2189" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text<span class="opacity-60">: typing.Union[str, typing.List[str], typing.List[int]]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text_pair<span class="opacity-60">: typing.Union[str, typing.List[str], typing.List[int], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">add_special_tokens<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">padding<span class="opacity-60">: typing.Union[bool, str, transformers.file_utils.PaddingStrategy] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">truncation<span class="opacity-60">: typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">stride<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: typing.Union[str, transformers.file_utils.TensorType, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code>, <code>torch.Tensor</code>, <code>tf.Tensor</code> or <code>np.ndarray</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode.text" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode.text"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text</strong> (<code>str</code>, <code>List[str]</code> or <code>List[int]</code>) &#x2014; The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the <code>tokenize</code> method) or a list of integers (tokenized string ids using the <code>convert_tokens_to_ids</code> method).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode.text_pair" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode.text_pair"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text_pair</strong> (<code>str</code>, <code>List[str]</code> or <code>List[int]</code>, <em>optional</em>) &#x2014; Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the <code>tokenize</code> method) or a list of integers (tokenized string ids using the <code>convert_tokens_to_ids</code> method).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode.add_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode.add_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode.padding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode.padding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode.truncation" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode.truncation"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode.stride" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode.stride"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode.is_split_into_words" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode.is_split_into_words"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode.pad_to_multiple_of" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode.pad_to_multiple_of"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul><!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PreTrainedTokenizerBase.encode.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code>, <code>torch.Tensor</code>, <code>tf.Tensor</code> or <code>np.ndarray</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The tokenized ids of the text.</p> <!-- HTML_TAG_END --></p></div></div> <p>Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary.</p> <p>Same as doing <code>self.convert_tokens_to_ids(self.tokenize(text))</code>.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.file_utils.PushToHubMixin.push_to_hub"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>push_to_hub</span></h4><!-- HTML_TAG_END --> <a id="transformers.file_utils.PushToHubMixin.push_to_hub" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.file_utils.PushToHubMixin.push_to_hub"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/file_utils.py#L2842" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">repo_path_or_name<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">repo_url<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_temp_dir<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">commit_message<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">organization<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">private<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_auth_token<span class="opacity-60">: typing.Union[bool, str, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**model_card_kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>str</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.repo_path_or_name" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.repo_path_or_name"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>repo_path_or_name</strong> (<code>str</code>, <em>optional</em>) &#x2014; Can either be a repository name for your tokenizer in the Hub or a path to a local folder (in which case the repository will have the name of that local folder). If not specified, will default to the name given by <code>repo_url</code> and a local directory with that name will be created.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.repo_url" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.repo_url"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>repo_url</strong> (<code>str</code>, <em>optional</em>) &#x2014; Specify this in case you want to push to an existing repository in the hub. If unspecified, a new repository will be created in your namespace (unless you specify an <code>organization</code>) with <code>repo_name</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.use_temp_dir" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.use_temp_dir"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_temp_dir</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to clone the distant repo in a temporary directory or in <code>repo_path_or_name</code> inside the current working directory. This will slow things down if you are making changes in an existing repo since you will need to clone the repo before every push.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.commit_message" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.commit_message"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>commit_message</strong> (<code>str</code>, <em>optional</em>) &#x2014; Message to commit while pushing. Will default to <code>&quot;add tokenizer&quot;</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.organization" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.organization"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>organization</strong> (<code>str</code>, <em>optional</em>) &#x2014; Organization in which you want to push your tokenizer (you must be a member of this organization).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.private" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.private"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>private</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not the repository created should be private (requires a paying subscription).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.use_auth_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.use_auth_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_auth_token</strong> (<code>bool</code> or <code>str</code>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>transformers-cli login</code> (stored in <code>~/.huggingface</code>). Will default to <code>True</code> if <code>repo_url</code> is not specified.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.file_utils.PushToHubMixin.push_to_hub.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>str</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The url of the commit of your tokenizer in the given repository.</p> <!-- HTML_TAG_END --></p></div></div> <p>Upload the tokenizer files to the 🤗 Model Hub while synchronizing a local clone of the repo in <code>repo_path_or_name</code>.</p> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-comment"># Push the tokenizer to your namespace with the name &quot;my-finetuned-bert&quot; and have a local clone in the</span> <span class="hljs-comment"># *my-finetuned-bert* folder.</span> tokenizer.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>) <span class="hljs-comment"># Push the tokenizer to your namespace with the name &quot;my-finetuned-bert&quot; with no local clone.</span> tokenizer.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, use_temp_dir=<span class="hljs-literal">True</span>) <span class="hljs-comment"># Push the tokenizer to an organization with the name &quot;my-finetuned-bert&quot; and have a local clone in the</span> <span class="hljs-comment"># *my-finetuned-bert* folder.</span> tokenizer.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, organization=<span class="hljs-string">&quot;huggingface&quot;</span>) <span class="hljs-comment"># Make a change to an existing repo that has been cloned locally in *my-finetuned-bert*.</span> tokenizer.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, repo_url=<span class="hljs-string">&quot;https://huggingface.co/sgugger/my-finetuned-bert&quot;</span>)<!-- HTML_TAG_END --></pre></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizer.convert_ids_to_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>convert_ids_to_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizer.convert_ids_to_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizer.convert_ids_to_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils.py#L881" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ids<span class="opacity-60">: typing.Union[int, typing.List[int]]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">skip_special_tokens<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>str</code> or <code>List[str]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.convert_ids_to_tokens.ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.convert_ids_to_tokens.ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ids</strong> (<code>int</code> or <code>List[int]</code>) &#x2014; The token id (or token ids) to convert to tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.convert_ids_to_tokens.skip_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.convert_ids_to_tokens.skip_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>skip_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to remove special tokens in the decoding.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PreTrainedTokenizer.convert_ids_to_tokens.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>str</code> or <code>List[str]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The decoded token(s).</p> <!-- HTML_TAG_END --></p></div></div> <p>Converts a single index or a sequence of indices in a token or a sequence of tokens, using the vocabulary and added tokens.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizer.convert_tokens_to_ids"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>convert_tokens_to_ids</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizer.convert_tokens_to_ids" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizer.convert_tokens_to_ids"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils.py#L560" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokens<span class="opacity-60">: typing.Union[str, typing.List[str]]</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>int</code> or <code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.convert_tokens_to_ids.tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.convert_tokens_to_ids.tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokens</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; One or several token(s) to convert to token id(s).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PreTrainedTokenizer.convert_tokens_to_ids.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>int</code> or <code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The token id or list of token ids.</p> <!-- HTML_TAG_END --></p></div></div> <p>Converts a token string (or a sequence of tokens) in a single integer id (or a sequence of ids), using the vocabulary.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizer.get_added_vocab"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_added_vocab</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizer.get_added_vocab" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizer.get_added_vocab"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils.py#L369" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>Dict[str, int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PreTrainedTokenizer.get_added_vocab.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>Dict[str, int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The added tokens.</p> <!-- HTML_TAG_END --></p></div></div> <p>Returns the added tokens in the vocabulary as a dictionary of token to index.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizer.num_special_tokens_to_add"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>num_special_tokens_to_add</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizer.num_special_tokens_to_add" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizer.num_special_tokens_to_add"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils.py#L458" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pair<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>int</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.num_special_tokens_to_add.pair" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.num_special_tokens_to_add.pair"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pair</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether the number of added tokens should be computed in the case of a sequence pair or a single sequence.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PreTrainedTokenizer.num_special_tokens_to_add.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>int</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Number of special tokens added to sequences.</p> <!-- HTML_TAG_END --></p></div></div> <p>Returns the number of added tokens when encoding a sequence with special tokens.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>This encodes a dummy input and checks the number of added tokens, and is therefore not efficient. Do not put this inside your training loop.</p></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizer.prepare_for_tokenization"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>prepare_for_tokenization</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizer.prepare_for_tokenization" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizer.prepare_for_tokenization"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils.py#L819" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text<span class="opacity-60">: str</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">is_split_into_words<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>Tuple[str, Dict[str, Any]]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.prepare_for_tokenization.text" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.prepare_for_tokenization.text"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text</strong> (<code>str</code>) &#x2014; The text to prepare.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.prepare_for_tokenization.is_split_into_words" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.prepare_for_tokenization.is_split_into_words"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification. kwargs &#x2014; Keyword arguments to use for the tokenization.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PreTrainedTokenizer.prepare_for_tokenization.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>Tuple[str, Dict[str, Any]]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The prepared text and the unused kwargs.</p> <!-- HTML_TAG_END --></p></div></div> <p>Performs any necessary transformations before tokenization.</p> <p>This method should pop the arguments from kwargs and return the remaining <code>kwargs</code> as well. We test the <code>kwargs</code> at the end of the encoding process to be sure all the arguments have been used.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizer.tokenize"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>tokenize</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizer.tokenize" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizer.tokenize"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils.py#L481" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text<span class="opacity-60">: str</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[str]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.tokenize.text" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.tokenize.text"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text</strong> (<code>str</code>) &#x2014; The sequence to be encoded.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizer.tokenize.*kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizer.tokenize.*kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START -->*<strong>*kwargs</strong> (additional keyword arguments) &#x2014; Passed along to the model-specific <code>prepare_for_tokenization</code> preprocessing method.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PreTrainedTokenizer.tokenize.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[str]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The list of tokens.</p> <!-- HTML_TAG_END --></p></div></div> <p>Converts a string in a sequence of tokens, using the tokenizer.</p> <p>Split in words for word-based vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces). Takes care of added tokens.</p></div></div> <h2 class="relative group"><a id="transformers.PreTrainedTokenizerFast" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>PreTrainedTokenizerFast </span></h2> <p>The <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a> depend on the <a href="https://huggingface.co/docs/tokenizers" rel="nofollow">tokenizers</a> library. The tokenizers obtained from the 🤗 tokenizers library can be loaded very simply into 🤗 transformers. Take a look at the <a href="../fast_tokenizers">Using tokenizers from 🤗 tokenizers</a> page to understand how this is done.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerFast"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">PreTrainedTokenizerFast</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerFast" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerFast"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_fast.py#L77" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.model_max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.model_max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model_max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; The maximum length (in number of tokens) for the inputs to the transformer model. When the tokenizer is loaded with <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.from_pretrained">from_pretrained()</a>, this will be set to the value stored for the associated model in <code>max_model_input_sizes</code> (see above). If no value is provided, will default to VERY_LARGE_INTEGER (<code>int(1e30)</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.padding_side" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.padding_side"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding_side</strong> (<code>str</code>, <em>optional</em>) &#x2014; The side on which the model should have padding applied. Should be selected between [&#x2018;right&#x2019;, &#x2018;left&#x2019;]. Default value is picked from the class attribute of the same name.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.truncation_side" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.truncation_side"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>truncation_side</strong> (<code>str</code>, <em>optional</em>) &#x2014; The side on which the model should have truncation applied. Should be selected between [&#x2018;right&#x2019;, &#x2018;left&#x2019;]. Default value is picked from the class attribute of the same name.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.model_input_names" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.model_input_names"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model_input_names</strong> (<code>List[string]</code>, <em>optional</em>) &#x2014; The list of inputs accepted by the forward pass of the model (like <code>&quot;token_type_ids&quot;</code> or <code>&quot;attention_mask&quot;</code>). Default value is picked from the class attribute of the same name.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.bos_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.bos_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bos_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the beginning of a sentence. Will be associated to <code>self.bos_token</code> and <code>self.bos_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.eos_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.eos_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the end of a sentence. Will be associated to <code>self.eos_token</code> and <code>self.eos_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.unk_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.unk_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>unk_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing an out-of-vocabulary token. Will be associated to <code>self.unk_token</code> and <code>self.unk_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.sep_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.sep_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sep_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token separating two different sentences in the same input (used by BERT for instance). Will be associated to <code>self.sep_token</code> and <code>self.sep_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.pad_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.pad_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by attention mechanisms or loss computation. Will be associated to <code>self.pad_token</code> and <code>self.pad_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.cls_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.cls_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cls_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the class of the input (used by BERT for instance). Will be associated to <code>self.cls_token</code> and <code>self.cls_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.mask_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.mask_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mask_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing a masked token (used by masked-language modeling pretraining objectives, like BERT). Will be associated to <code>self.mask_token</code> and <code>self.mask_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.additional_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.additional_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>additional_special_tokens</strong> (tuple or list of <code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A tuple or a list of additional special tokens. Add them here to ensure they won&#x2019;t be split by the tokenization process. Will be associated to <code>self.additional_special_tokens</code> and <code>self.additional_special_tokens_ids</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.tokenizer_object" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.tokenizer_object"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer_object</strong> (<code>tokenizers.Tokenizer</code>) &#x2014; A <code>tokenizers.Tokenizer</code> object from &#x1F917; tokenizers to instantiate from. See <a href="../fast_tokenizers">Using tokenizers from &#x1F917; tokenizers</a> for more information.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.tokenizer_file" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.tokenizer_file"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer_file</strong> (<code>str</code>) &#x2014; A path to a local JSON file representing a previously serialized <code>tokenizers.Tokenizer</code> object from &#x1F917; tokenizers.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for all fast tokenizers (wrapping HuggingFace tokenizers library).</p> <p>Inherits from <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase">PreTrainedTokenizerBase</a>.</p> <p>Handles all the shared methods for tokenization and special tokens, as well as methods for downloading/caching/loading pretrained tokenizers, as well as adding tokens to the vocabulary.</p> <p>This class also contains the added tokens in a unified way on top of all tokenizers so we don’t have to handle the specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece…).</p> <p>Class attributes (overridden by derived classes)</p> <ul><li><strong>vocab_files_names</strong> (<code>Dict[str, str]</code>) — A dictionary with, as keys, the <code>__init__</code> keyword name of each vocabulary file required by the model, and as associated values, the filename for saving the associated file (string).</li> <li><strong>pretrained_vocab_files_map</strong> (<code>Dict[str, Dict[str, str]]</code>) — A dictionary of dictionaries, with the high-level keys being the <code>__init__</code> keyword name of each vocabulary file required by the model, the low-level being the <code>short-cut-names</code> of the pretrained models with, as associated values, the <code>url</code> to the associated pretrained vocabulary file.</li> <li><strong>max_model_input_sizes</strong> (<code>Dict[str, Optional[int]]</code>) — A dictionary with, as keys, the <code>short-cut-names</code> of the pretrained models, and as associated values, the maximum length of the sequence inputs of this model, or <code>None</code> if the model has no maximum input size.</li> <li><strong>pretrained_init_configuration</strong> (<code>Dict[str, Dict[str, Any]]</code>) — A dictionary with, as keys, the <code>short-cut-names</code> of the pretrained models, and as associated values, a dictionary of specific arguments to pass to the <code>__init__</code> method of the tokenizer class for this pretrained model when loading the tokenizer with the <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.from_pretrained">from_pretrained()</a> method.</li> <li><strong>model_input_names</strong> (<code>List[str]</code>) — A list of inputs expected in the forward pass of the model.</li> <li><strong>padding_side</strong> (<code>str</code>) — The default value for the side on which the model should have padding applied. Should be <code>&#39;right&#39;</code> or <code>&#39;left&#39;</code>.</li> <li><strong>truncation_side</strong> (<code>str</code>) — The default value for the side on which the model should have truncation applied. Should be <code>&#39;right&#39;</code> or <code>&#39;left&#39;</code>.</li></ul> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L2379" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text<span class="opacity-60">: typing.Union[str, typing.List[str], typing.List[typing.List[str]]]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text_pair<span class="opacity-60">: typing.Union[str, typing.List[str], typing.List[typing.List[str]], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">add_special_tokens<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">padding<span class="opacity-60">: typing.Union[bool, str, transformers.file_utils.PaddingStrategy] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">truncation<span class="opacity-60">: typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">stride<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">is_split_into_words<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_to_multiple_of<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: typing.Union[str, transformers.file_utils.TensorType, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_token_type_ids<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_attention_mask<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_overflowing_tokens<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_special_tokens_mask<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_offsets_mapping<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_length<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">verbose<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.text" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.text"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set <code>is_split_into_words=True</code> (to lift the ambiguity with a batch of sequences).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.text_pair" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.text_pair"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text_pair</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set <code>is_split_into_words=True</code> (to lift the ambiguity with a batch of sequences).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.add_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.add_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.padding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.padding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.truncation" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.truncation"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.stride" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.stride"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.is_split_into_words" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.is_split_into_words"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.pad_to_multiple_of" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.pad_to_multiple_of"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.return_token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.return_token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_token_type_ids</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.return_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.return_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.return_overflowing_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.return_overflowing_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_overflowing_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with <code>truncation_strategy = longest_first</code> or <code>True</code>, an error is raised instead of returning overflowing tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.return_special_tokens_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.return_special_tokens_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_special_tokens_mask</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return special tokens mask information.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.return_offsets_mapping" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.return_offsets_mapping"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_offsets_mapping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return <code>(char_start, char_end)</code> for each token.</p> <p>This is only available on fast tokenizers inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>, if using Python&#x2019;s tokenizer, this method will raise <code>NotImplementedError</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.return_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.return_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_length</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the lengths of the encoded inputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.verbose" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.verbose"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>verbose</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to print more information and warnings. **kwargs &#x2014; passed to the <code>self.tokenize()</code> method<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PreTrainedTokenizerBase.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a> with the following fields:</p> <ul> <li> <p><strong>input_ids</strong> — List of token ids to be fed to a model.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> </li> <li> <p><strong>token_type_ids</strong> — List of token type ids to be fed to a model (when <code>return_token_type_ids=True</code> or if <em>“token_type_ids”</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a></p> </li> <li> <p><strong>attention_mask</strong> — List of indices specifying which tokens should be attended to by the model (when <code>return_attention_mask=True</code> or if <em>“attention_mask”</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> </li> <li> <p><strong>overflowing_tokens</strong> — List of overflowing tokens sequences (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>num_truncated_tokens</strong> — Number of tokens truncated (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>special_tokens_mask</strong> — List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when <code>add_special_tokens=True</code> and <code>return_special_tokens_mask=True</code>).</p> </li> <li> <p><strong>length</strong> — The length of the inputs (when <code>return_length=True</code>)</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.batch_decode"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>batch_decode</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.batch_decode" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.batch_decode"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L3250" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sequences<span class="opacity-60">: typing.Union[typing.List[int], typing.List[typing.List[int]], ForwardRef(&#39;np.ndarray&#39;), ForwardRef(&#39;torch.Tensor&#39;), ForwardRef(&#39;tf.Tensor&#39;)]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">skip_special_tokens<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">clean_up_tokenization_spaces<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[str]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_decode.sequences" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_decode.sequences"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequences</strong> (<code>Union[List[int], List[List[int]], np.ndarray, torch.Tensor, tf.Tensor]</code>) &#x2014; List of tokenized input ids. Can be obtained using the <code>__call__</code> method.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_decode.skip_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_decode.skip_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>skip_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to remove special tokens in the decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_decode.clean_up_tokenization_spaces" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_decode.clean_up_tokenization_spaces"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to clean up the tokenization spaces.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_decode.kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_decode.kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Will be passed to the underlying model specific decode method.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PreTrainedTokenizerBase.batch_decode.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[str]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The list of decoded sentences.</p> <!-- HTML_TAG_END --></p></div></div> <p>Convert a list of lists of token ids into a list of strings by calling decode.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.decode"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>decode</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.decode" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.decode"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L3283" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids<span class="opacity-60">: typing.Union[int, typing.List[int], ForwardRef(&#39;np.ndarray&#39;), ForwardRef(&#39;torch.Tensor&#39;), ForwardRef(&#39;tf.Tensor&#39;)]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">skip_special_tokens<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">clean_up_tokenization_spaces<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>str</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.decode.token_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.decode.token_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids</strong> (<code>Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]</code>) &#x2014; List of tokenized input ids. Can be obtained using the <code>__call__</code> method.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.decode.skip_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.decode.skip_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>skip_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to remove special tokens in the decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.decode.clean_up_tokenization_spaces" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.decode.clean_up_tokenization_spaces"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to clean up the tokenization spaces.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.decode.kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.decode.kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Will be passed to the underlying model specific decode method.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PreTrainedTokenizerBase.decode.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>str</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The decoded sentence.</p> <!-- HTML_TAG_END --></p></div></div> <p>Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces.</p> <p>Similar to doing <code>self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))</code>.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.encode"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>encode</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.encode" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.encode"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L2189" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text<span class="opacity-60">: typing.Union[str, typing.List[str], typing.List[int]]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text_pair<span class="opacity-60">: typing.Union[str, typing.List[str], typing.List[int], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">add_special_tokens<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">padding<span class="opacity-60">: typing.Union[bool, str, transformers.file_utils.PaddingStrategy] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">truncation<span class="opacity-60">: typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">stride<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: typing.Union[str, transformers.file_utils.TensorType, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code>, <code>torch.Tensor</code>, <code>tf.Tensor</code> or <code>np.ndarray</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode.text" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode.text"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text</strong> (<code>str</code>, <code>List[str]</code> or <code>List[int]</code>) &#x2014; The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the <code>tokenize</code> method) or a list of integers (tokenized string ids using the <code>convert_tokens_to_ids</code> method).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode.text_pair" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode.text_pair"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text_pair</strong> (<code>str</code>, <code>List[str]</code> or <code>List[int]</code>, <em>optional</em>) &#x2014; Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the <code>tokenize</code> method) or a list of integers (tokenized string ids using the <code>convert_tokens_to_ids</code> method).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode.add_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode.add_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode.padding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode.padding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode.truncation" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode.truncation"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode.stride" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode.stride"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode.is_split_into_words" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode.is_split_into_words"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode.pad_to_multiple_of" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode.pad_to_multiple_of"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul><!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PreTrainedTokenizerBase.encode.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code>, <code>torch.Tensor</code>, <code>tf.Tensor</code> or <code>np.ndarray</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The tokenized ids of the text.</p> <!-- HTML_TAG_END --></p></div></div> <p>Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary.</p> <p>Same as doing <code>self.convert_tokens_to_ids(self.tokenize(text))</code>.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.file_utils.PushToHubMixin.push_to_hub"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>push_to_hub</span></h4><!-- HTML_TAG_END --> <a id="transformers.file_utils.PushToHubMixin.push_to_hub" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.file_utils.PushToHubMixin.push_to_hub"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/file_utils.py#L2842" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">repo_path_or_name<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">repo_url<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_temp_dir<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">commit_message<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">organization<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">private<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_auth_token<span class="opacity-60">: typing.Union[bool, str, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**model_card_kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>str</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.repo_path_or_name" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.repo_path_or_name"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>repo_path_or_name</strong> (<code>str</code>, <em>optional</em>) &#x2014; Can either be a repository name for your tokenizer in the Hub or a path to a local folder (in which case the repository will have the name of that local folder). If not specified, will default to the name given by <code>repo_url</code> and a local directory with that name will be created.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.repo_url" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.repo_url"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>repo_url</strong> (<code>str</code>, <em>optional</em>) &#x2014; Specify this in case you want to push to an existing repository in the hub. If unspecified, a new repository will be created in your namespace (unless you specify an <code>organization</code>) with <code>repo_name</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.use_temp_dir" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.use_temp_dir"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_temp_dir</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to clone the distant repo in a temporary directory or in <code>repo_path_or_name</code> inside the current working directory. This will slow things down if you are making changes in an existing repo since you will need to clone the repo before every push.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.commit_message" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.commit_message"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>commit_message</strong> (<code>str</code>, <em>optional</em>) &#x2014; Message to commit while pushing. Will default to <code>&quot;add tokenizer&quot;</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.organization" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.organization"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>organization</strong> (<code>str</code>, <em>optional</em>) &#x2014; Organization in which you want to push your tokenizer (you must be a member of this organization).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.private" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.private"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>private</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not the repository created should be private (requires a paying subscription).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.use_auth_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.use_auth_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_auth_token</strong> (<code>bool</code> or <code>str</code>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>transformers-cli login</code> (stored in <code>~/.huggingface</code>). Will default to <code>True</code> if <code>repo_url</code> is not specified.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.file_utils.PushToHubMixin.push_to_hub.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>str</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The url of the commit of your tokenizer in the given repository.</p> <!-- HTML_TAG_END --></p></div></div> <p>Upload the tokenizer files to the 🤗 Model Hub while synchronizing a local clone of the repo in <code>repo_path_or_name</code>.</p> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-comment"># Push the tokenizer to your namespace with the name &quot;my-finetuned-bert&quot; and have a local clone in the</span> <span class="hljs-comment"># *my-finetuned-bert* folder.</span> tokenizer.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>) <span class="hljs-comment"># Push the tokenizer to your namespace with the name &quot;my-finetuned-bert&quot; with no local clone.</span> tokenizer.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, use_temp_dir=<span class="hljs-literal">True</span>) <span class="hljs-comment"># Push the tokenizer to an organization with the name &quot;my-finetuned-bert&quot; and have a local clone in the</span> <span class="hljs-comment"># *my-finetuned-bert* folder.</span> tokenizer.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, organization=<span class="hljs-string">&quot;huggingface&quot;</span>) <span class="hljs-comment"># Make a change to an existing repo that has been cloned locally in *my-finetuned-bert*.</span> tokenizer.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, repo_url=<span class="hljs-string">&quot;https://huggingface.co/sgugger/my-finetuned-bert&quot;</span>)<!-- HTML_TAG_END --></pre></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerFast.convert_ids_to_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>convert_ids_to_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerFast.convert_ids_to_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerFast.convert_ids_to_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_fast.py#L292" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ids<span class="opacity-60">: typing.Union[int, typing.List[int]]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">skip_special_tokens<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>str</code> or <code>List[str]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.convert_ids_to_tokens.ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.convert_ids_to_tokens.ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ids</strong> (<code>int</code> or <code>List[int]</code>) &#x2014; The token id (or token ids) to convert to tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.convert_ids_to_tokens.skip_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.convert_ids_to_tokens.skip_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>skip_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to remove special tokens in the decoding.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PreTrainedTokenizerFast.convert_ids_to_tokens.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>str</code> or <code>List[str]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The decoded token(s).</p> <!-- HTML_TAG_END --></p></div></div> <p>Converts a single index or a sequence of indices in a token or a sequence of tokens, using the vocabulary and added tokens.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerFast.convert_tokens_to_ids"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>convert_tokens_to_ids</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerFast.convert_tokens_to_ids" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerFast.convert_tokens_to_ids"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_fast.py#L234" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokens<span class="opacity-60">: typing.Union[str, typing.List[str]]</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>int</code> or <code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.convert_tokens_to_ids.tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.convert_tokens_to_ids.tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokens</strong> (<code>str</code> or <code>List[str]</code>) &#x2014; One or several token(s) to convert to token id(s).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PreTrainedTokenizerFast.convert_tokens_to_ids.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>int</code> or <code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The token id or list of token ids.</p> <!-- HTML_TAG_END --></p></div></div> <p>Converts a token string (or a sequence of tokens) in a single integer id (or a sequence of ids), using the vocabulary.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerFast.get_added_vocab"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_added_vocab</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerFast.get_added_vocab" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerFast.get_added_vocab"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_fast.py#L155" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>Dict[str, int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PreTrainedTokenizerFast.get_added_vocab.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>Dict[str, int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The added tokens.</p> <!-- HTML_TAG_END --></p></div></div> <p>Returns the added tokens in the vocabulary as a dictionary of token to index.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerFast.num_special_tokens_to_add"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>num_special_tokens_to_add</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerFast.num_special_tokens_to_add" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerFast.num_special_tokens_to_add"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_fast.py#L271" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pair<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>int</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.num_special_tokens_to_add.pair" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.num_special_tokens_to_add.pair"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pair</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether the number of added tokens should be computed in the case of a sequence pair or a single sequence.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PreTrainedTokenizerFast.num_special_tokens_to_add.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>int</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Number of special tokens added to sequences.</p> <!-- HTML_TAG_END --></p></div></div> <p>Returns the number of added tokens when encoding a sequence with special tokens.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>This encodes a dummy input and checks the number of added tokens, and is therefore not efficient. Do not put this inside your training loop.</p></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerFast.set_truncation_and_padding"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>set_truncation_and_padding</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerFast.set_truncation_and_padding" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerFast.set_truncation_and_padding"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_fast.py#L321" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">padding_strategy<span class="opacity-60">: PaddingStrategy</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">truncation_strategy<span class="opacity-60">: TruncationStrategy</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">stride<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_to_multiple_of<span class="opacity-60">: typing.Optional[int]</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.set_truncation_and_padding.padding_strategy" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.set_truncation_and_padding.padding_strategy"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding_strategy</strong> (<a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>) &#x2014; The kind of padding that will be applied to the input<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.set_truncation_and_padding.truncation_strategy" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.set_truncation_and_padding.truncation_strategy"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>truncation_strategy</strong> (<a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>) &#x2014; The kind of truncation that will be applied to the input<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.set_truncation_and_padding.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.set_truncation_and_padding.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>) &#x2014; The maximum size of a sequence.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.set_truncation_and_padding.stride" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.set_truncation_and_padding.stride"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>stride</strong> (<code>int</code>) &#x2014; The stride to use when handling overflow.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.set_truncation_and_padding.pad_to_multiple_of" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.set_truncation_and_padding.pad_to_multiple_of"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Define the truncation and the padding strategies for fast tokenizers (provided by HuggingFace tokenizers library) and restore the tokenizer settings afterwards.</p> <p>The provided tokenizer has no padding / truncation strategy before the managed section. If your tokenizer set a padding / truncation strategy before, then it will be reset to no padding / truncation when exiting the managed section.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerFast.train_new_from_iterator"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>train_new_from_iterator</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerFast.train_new_from_iterator" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerFast.train_new_from_iterator"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_fast.py#L604" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text_iterator<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">vocab_size<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">new_special_tokens<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">special_tokens_map<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast" >PreTrainedTokenizerFast</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.train_new_from_iterator.text_iterator" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.train_new_from_iterator.text_iterator"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text_iterator</strong> (generator of <code>List[str]</code>) &#x2014; The training corpus. Should be a generator of batches of texts, for instance a list of lists of texts if you have everything in memory.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.train_new_from_iterator.vocab_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.train_new_from_iterator.vocab_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vocab_size</strong> (<code>int</code>) &#x2014; The size of the vocabulary you want for your tokenizer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.train_new_from_iterator.new_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.train_new_from_iterator.new_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>new_special_tokens</strong> (list of <code>str</code> or <code>AddedToken</code>, <em>optional</em>) &#x2014; A list of new special tokens to add to the tokenizer you are training.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerFast.train_new_from_iterator.special_tokens_map" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerFast.train_new_from_iterator.special_tokens_map"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>special_tokens_map</strong> (<code>Dict[str, str]</code>, <em>optional</em>) &#x2014; If you want to rename some of the special tokens this tokenizer uses, pass along a mapping old special token name to new special token name in this argument. kwargs &#x2014; Additional keyword arguments passed along to the trainer from the &#x1F917; Tokenizers library.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PreTrainedTokenizerFast.train_new_from_iterator.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast" >PreTrainedTokenizerFast</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A new tokenizer of the same type as the original one, trained on <code>text_iterator</code>.</p> <!-- HTML_TAG_END --></p></div></div> <p>Trains a tokenizer on a new corpus with the same defaults (in terms of special tokens or tokenization pipeline) as the current one.</p></div></div> <h2 class="relative group"><a id="transformers.BatchEncoding" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>BatchEncoding </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BatchEncoding"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">BatchEncoding</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.BatchEncoding" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BatchEncoding"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L167" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">data<span class="opacity-60">: typing.Union[typing.Dict[str, typing.Any], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoding<span class="opacity-60">: typing.Union[tokenizers.Encoding, typing.Sequence[tokenizers.Encoding], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tensor_type<span class="opacity-60">: typing.Union[NoneType, str, transformers.file_utils.TensorType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">prepend_batch_axis<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">n_sequences<span class="opacity-60">: typing.Optional[int] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.data" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.data"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>data</strong> (<code>dict</code>) &#x2014; Dictionary of lists/arrays/tensors returned by the encode/batch_encode methods (&#x2018;input_ids&#x2019;, &#x2018;attention_mask&#x2019;, etc.).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.encoding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.encoding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoding</strong> (<code>tokenizers.Encoding</code> or <code>Sequence[tokenizers.Encoding]</code>, <em>optional</em>) &#x2014; If the tokenizer is a fast tokenizer which outputs additional information like mapping from word/character space to token space the <code>tokenizers.Encoding</code> instance or list of instance (for batches) hold this information.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.tensor_type" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.tensor_type"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tensor_type</strong> (<code>Union[None, str, TensorType]</code>, <em>optional</em>) &#x2014; You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at initialization.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.prepend_batch_axis" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.prepend_batch_axis"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>prepend_batch_axis</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to add a batch axis when converting to tensors (see <code>tensor_type</code> above).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.n_sequences" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.n_sequences"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>n_sequences</strong> (<code>Optional[int]</code>, <em>optional</em>) &#x2014; You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at initialization.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Holds the output of the <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode_plus">encode_plus()</a> and <code>batch_encode</code> methods (tokens, attention_masks, etc).</p> <p>This class is derived from a python dictionary and can be used as a dictionary. In addition, this class exposes utility methods to map from word/character space to token space.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BatchEncoding.char_to_token"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>char_to_token</span></h4><!-- HTML_TAG_END --> <a id="transformers.BatchEncoding.char_to_token" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BatchEncoding.char_to_token"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L523" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">batch_or_char_index<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">char_index<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sequence_index<span class="opacity-60">: int = 0</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>int</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.char_to_token.batch_or_char_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.char_to_token.batch_or_char_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_or_char_index</strong> (<code>int</code>) &#x2014; Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of the word in the sequence<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.char_to_token.char_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.char_to_token.char_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>char_index</strong> (<code>int</code>, <em>optional</em>) &#x2014; If a batch index is provided in <em>batch_or_token_index</em>, this can be the index of the word in the sequence.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.char_to_token.sequence_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.char_to_token.sequence_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequence_index</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0 or 1) the provided character index belongs to.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.BatchEncoding.char_to_token.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>int</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Index of the token.</p> <!-- HTML_TAG_END --></p></div></div> <p>Get the index of the token in the encoded output comprising a character in the original string for a sequence of the batch.</p> <p>Can be called as:</p> <ul><li><code>self.char_to_token(char_index)</code> if batch size is 1</li> <li><code>self.char_to_token(batch_index, char_index)</code> if batch size is greater or equal to 1</li></ul> <p>This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BatchEncoding.char_to_word"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>char_to_word</span></h4><!-- HTML_TAG_END --> <a id="transformers.BatchEncoding.char_to_word" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BatchEncoding.char_to_word"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L609" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">batch_or_char_index<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">char_index<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sequence_index<span class="opacity-60">: int = 0</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>int</code> or <code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.char_to_word.batch_or_char_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.char_to_word.batch_or_char_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_or_char_index</strong> (<code>int</code>) &#x2014; Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of the character in the original string.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.char_to_word.char_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.char_to_word.char_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>char_index</strong> (<code>int</code>, <em>optional</em>) &#x2014; If a batch index is provided in <em>batch_or_token_index</em>, this can be the index of the character in the original string.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.char_to_word.sequence_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.char_to_word.sequence_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequence_index</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0 or 1) the provided character index belongs to.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.BatchEncoding.char_to_word.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>int</code> or <code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Index or indices of the associated encoded token(s).</p> <!-- HTML_TAG_END --></p></div></div> <p>Get the word in the original string corresponding to a character in the original string of a sequence of the batch.</p> <p>Can be called as:</p> <ul><li><code>self.char_to_word(char_index)</code> if batch size is 1</li> <li><code>self.char_to_word(batch_index, char_index)</code> if batch size is greater than 1</li></ul> <p>This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BatchEncoding.convert_to_tensors"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>convert_to_tensors</span></h4><!-- HTML_TAG_END --> <a id="transformers.BatchEncoding.convert_to_tensors" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BatchEncoding.convert_to_tensors"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L648" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tensor_type<span class="opacity-60">: typing.Union[str, transformers.file_utils.TensorType, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">prepend_batch_axis<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.convert_to_tensors.tensor_type" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.convert_to_tensors.tensor_type"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tensor_type</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; The type of tensors to use. If <code>str</code>, should be one of the values of the enum <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.TensorType">TensorType</a>. If <code>None</code>, no modification is done.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.convert_to_tensors.prepend_batch_axis" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.convert_to_tensors.prepend_batch_axis"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>prepend_batch_axis</strong> (<code>int</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to add the batch dimension during the conversion.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Convert the inner content to tensors.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BatchEncoding.sequence_ids"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>sequence_ids</span></h4><!-- HTML_TAG_END --> <a id="transformers.BatchEncoding.sequence_ids" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BatchEncoding.sequence_ids"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L301" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">batch_index<span class="opacity-60">: int = 0</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[Optional[int]]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.sequence_ids.batch_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.sequence_ids.batch_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_index</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; The index to access in the batch.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.BatchEncoding.sequence_ids.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[Optional[int]]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A list indicating the sequence id corresponding to each token. Special tokens added by the tokenizer are mapped to <code>None</code> and other tokens are mapped to the index of their corresponding sequence.</p> <!-- HTML_TAG_END --></p></div></div> <p>Return a list mapping the tokens to the id of their original sentences:</p> <ul><li><code>None</code> for special tokens added around or between sequences,</li> <li><code>0</code> for tokens corresponding to words in the first sequence,</li> <li><code>1</code> for tokens corresponding to words in the second sequence when a pair of sequences was jointly encoded.</li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BatchEncoding.to"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>to</span></h4><!-- HTML_TAG_END --> <a id="transformers.BatchEncoding.to" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BatchEncoding.to"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L731" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">device<span class="opacity-60">: typing.Union[str, ForwardRef(&#39;torch.device&#39;)]</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.to.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.to.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>str</code> or <code>torch.device</code>) &#x2014; The device to put the tensors on.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.BatchEncoding.to.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The same instance after modification.</p> <!-- HTML_TAG_END --></p></div></div> <p>Send all values to device by calling <code>v.to(device)</code> (PyTorch only).</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BatchEncoding.token_to_chars"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>token_to_chars</span></h4><!-- HTML_TAG_END --> <a id="transformers.BatchEncoding.token_to_chars" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BatchEncoding.token_to_chars"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L487" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">batch_or_token_index<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_index<span class="opacity-60">: typing.Optional[int] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.CharSpan" >CharSpan</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.token_to_chars.batch_or_token_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.token_to_chars.batch_or_token_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_or_token_index</strong> (<code>int</code>) &#x2014; Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of the token in the sequence.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.token_to_chars.token_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.token_to_chars.token_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_index</strong> (<code>int</code>, <em>optional</em>) &#x2014; If a batch index is provided in <em>batch_or_token_index</em>, this can be the index of the token or tokens in the sequence.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.BatchEncoding.token_to_chars.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.CharSpan" >CharSpan</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Span of characters in the original string.</p> <!-- HTML_TAG_END --></p></div></div> <p>Get the character span corresponding to an encoded token in a sequence of the batch.</p> <p>Character spans are returned as a <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.CharSpan">CharSpan</a> with:</p> <ul><li><strong>start</strong> — Index of the first character in the original string associated to the token.</li> <li><strong>end</strong> — Index of the character following the last character in the original string associated to the token.</li></ul> <p>Can be called as:</p> <ul><li><code>self.token_to_chars(token_index)</code> if batch size is 1</li> <li><code>self.token_to_chars(batch_index, token_index)</code> if batch size is greater or equal to 1</li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BatchEncoding.token_to_sequence"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>token_to_sequence</span></h4><!-- HTML_TAG_END --> <a id="transformers.BatchEncoding.token_to_sequence" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BatchEncoding.token_to_sequence"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L359" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">batch_or_token_index<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_index<span class="opacity-60">: typing.Optional[int] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>int</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.token_to_sequence.batch_or_token_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.token_to_sequence.batch_or_token_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_or_token_index</strong> (<code>int</code>) &#x2014; Index of the sequence in the batch. If the batch only comprises one sequence, this can be the index of the token in the sequence.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.token_to_sequence.token_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.token_to_sequence.token_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_index</strong> (<code>int</code>, <em>optional</em>) &#x2014; If a batch index is provided in <em>batch_or_token_index</em>, this can be the index of the token in the sequence.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.BatchEncoding.token_to_sequence.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>int</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Index of the word in the input sequence.</p> <!-- HTML_TAG_END --></p></div></div> <p>Get the index of the sequence represented by the given token. In the general use case, this method returns <code>0</code> for a single sequence or the first sequence of a pair, and <code>1</code> for the second sequence of a pair</p> <p>Can be called as:</p> <ul><li><code>self.token_to_sequence(token_index)</code> if batch size is 1</li> <li><code>self.token_to_sequence(batch_index, token_index)</code> if batch size is greater than 1</li></ul> <p>This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e., words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BatchEncoding.token_to_word"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>token_to_word</span></h4><!-- HTML_TAG_END --> <a id="transformers.BatchEncoding.token_to_word" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BatchEncoding.token_to_word"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L398" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">batch_or_token_index<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_index<span class="opacity-60">: typing.Optional[int] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>int</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.token_to_word.batch_or_token_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.token_to_word.batch_or_token_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_or_token_index</strong> (<code>int</code>) &#x2014; Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of the token in the sequence.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.token_to_word.token_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.token_to_word.token_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_index</strong> (<code>int</code>, <em>optional</em>) &#x2014; If a batch index is provided in <em>batch_or_token_index</em>, this can be the index of the token in the sequence.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.BatchEncoding.token_to_word.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>int</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Index of the word in the input sequence.</p> <!-- HTML_TAG_END --></p></div></div> <p>Get the index of the word corresponding (i.e. comprising) to an encoded token in a sequence of the batch.</p> <p>Can be called as:</p> <ul><li><code>self.token_to_word(token_index)</code> if batch size is 1</li> <li><code>self.token_to_word(batch_index, token_index)</code> if batch size is greater than 1</li></ul> <p>This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e., words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BatchEncoding.tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.BatchEncoding.tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BatchEncoding.tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L286" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">batch_index<span class="opacity-60">: int = 0</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[str]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.tokens.batch_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.tokens.batch_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_index</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; The index to access in the batch.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.BatchEncoding.tokens.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[str]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The list of tokens at that index.</p> <!-- HTML_TAG_END --></p></div></div> <p>Return the list of tokens (sub-parts of the input strings after word/subword splitting and before conversion to integer indices) at a given batch index (only works for the output of a fast tokenizer).</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BatchEncoding.word_ids"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>word_ids</span></h4><!-- HTML_TAG_END --> <a id="transformers.BatchEncoding.word_ids" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BatchEncoding.word_ids"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L343" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">batch_index<span class="opacity-60">: int = 0</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[Optional[int]]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.word_ids.batch_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.word_ids.batch_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_index</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; The index to access in the batch.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.BatchEncoding.word_ids.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[Optional[int]]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A list indicating the word corresponding to each token. Special tokens added by the tokenizer are mapped to <code>None</code> and other tokens are mapped to the index of their corresponding word (several tokens will be mapped to the same word index if they are parts of that word).</p> <!-- HTML_TAG_END --></p></div></div> <p>Return a list mapping the tokens to their actual word in the initial sentence for a fast tokenizer.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BatchEncoding.word_to_chars"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>word_to_chars</span></h4><!-- HTML_TAG_END --> <a id="transformers.BatchEncoding.word_to_chars" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BatchEncoding.word_to_chars"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L564" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">batch_or_word_index<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">word_index<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sequence_index<span class="opacity-60">: int = 0</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>CharSpan</code> or <code>List[CharSpan]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.word_to_chars.batch_or_word_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.word_to_chars.batch_or_word_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_or_word_index</strong> (<code>int</code>) &#x2014; Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of the word in the sequence<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.word_to_chars.word_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.word_to_chars.word_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>word_index</strong> (<code>int</code>, <em>optional</em>) &#x2014; If a batch index is provided in <em>batch_or_token_index</em>, this can be the index of the word in the sequence.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.word_to_chars.sequence_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.word_to_chars.sequence_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequence_index</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0 or 1) the provided word index belongs to.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.BatchEncoding.word_to_chars.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>CharSpan</code> or <code>List[CharSpan]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Span(s) of the associated character or characters in the string. CharSpan are NamedTuple with:</p> <ul> <li>start: index of the first character associated to the token in the original string</li> <li>end: index of the character following the last character associated to the token in the original string</li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>Get the character span in the original string corresponding to given word in a sequence of the batch.</p> <p>Character spans are returned as a CharSpan NamedTuple with:</p> <ul><li>start: index of the first character in the original string</li> <li>end: index of the character following the last character in the original string</li></ul> <p>Can be called as:</p> <ul><li><code>self.word_to_chars(word_index)</code> if batch size is 1</li> <li><code>self.word_to_chars(batch_index, word_index)</code> if batch size is greater or equal to 1</li></ul></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BatchEncoding.word_to_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>word_to_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.BatchEncoding.word_to_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BatchEncoding.word_to_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L436" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">batch_or_word_index<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">word_index<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sequence_index<span class="opacity-60">: int = 0</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.word_to_tokens.batch_or_word_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.word_to_tokens.batch_or_word_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_or_word_index</strong> (<code>int</code>) &#x2014; Index of the sequence in the batch. If the batch only comprises one sequence, this can be the index of the word in the sequence.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.word_to_tokens.word_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.word_to_tokens.word_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>word_index</strong> (<code>int</code>, <em>optional</em>) &#x2014; If a batch index is provided in <em>batch_or_token_index</em>, this can be the index of the word in the sequence.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.word_to_tokens.sequence_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.word_to_tokens.sequence_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequence_index</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0 or 1) the provided word index belongs to.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Get the encoded token span corresponding to a word in a sequence of the batch.</p> <p>Token spans are returned as a <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.TokenSpan">TokenSpan</a> with:</p> <ul><li><strong>start</strong> — Index of the first token.</li> <li><strong>end</strong> — Index of the token following the last token.</li></ul> <p>Can be called as:</p> <ul><li><code>self.word_to_tokens(word_index, sequence_index: int = 0)</code> if batch size is 1</li> <li><code>self.word_to_tokens(batch_index, word_index, sequence_index: int = 0)</code> if batch size is greater or equal to 1</li></ul> <p>This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BatchEncoding.words"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>words</span></h4><!-- HTML_TAG_END --> <a id="transformers.BatchEncoding.words" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BatchEncoding.words"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L322" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">batch_index<span class="opacity-60">: int = 0</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[Optional[int]]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BatchEncoding.words.batch_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BatchEncoding.words.batch_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_index</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; The index to access in the batch.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.BatchEncoding.words.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[Optional[int]]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A list indicating the word corresponding to each token. Special tokens added by the tokenizer are mapped to <code>None</code> and other tokens are mapped to the index of their corresponding word (several tokens will be mapped to the same word index if they are parts of that word).</p> <!-- HTML_TAG_END --></p></div></div> <p>Return a list mapping the tokens to their actual word in the initial sentence for a fast tokenizer.</p></div></div> <script type="module" data-hydrate="fcanyr"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="fcanyr"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/main_classes/tokenizer.mdx-187685a5.js") ], params: {} } }); </script>
433
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/main_classes/keras_callbacks.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;keras-callbacks&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.KerasMetricCallback&quot;,&quot;title&quot;:&quot;KerasMetricCallback&quot;},{&quot;local&quot;:&quot;transformers.PushToHubCallback&quot;,&quot;title&quot;:&quot;PushToHubCallback&quot;}],&quot;title&quot;:&quot;Keras callbacks&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/main_classes/keras_callbacks.mdx-7ee92fa9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="keras-callbacks" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#keras-callbacks"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Keras callbacks </span></h1> <p>When training a Transformers model with Keras, there are some library-specific callbacks available to automate common tasks:</p> <h2 class="relative group"><a id="transformers.KerasMetricCallback" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.KerasMetricCallback"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>KerasMetricCallback </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.KerasMetricCallback"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">KerasMetricCallback</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.KerasMetricCallback" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.KerasMetricCallback"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/keras_callbacks.py#L22" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">metric_fn<span class="opacity-60">: typing.Callable</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eval_dataset<span class="opacity-60">: typing.Union[tensorflow.python.data.ops.dataset_ops.DatasetV2, numpy.ndarray, tensorflow.python.framework.ops.Tensor, tuple, dict]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_cols<span class="opacity-60">: typing.Optional[typing.List[str]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">label_cols<span class="opacity-60">: typing.Optional[typing.List[str]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">batch_size<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">predict_with_generate<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.KerasMetricCallback.metric_fn" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.KerasMetricCallback.metric_fn"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>metric_fn</strong> (<code>Callable</code>) &#x2014; Metric function provided by the user. It will be called with two arguments - <code>predictions</code> and <code>labels</code>. These contain the model&#x2019;s outputs and matching labels from the dataset. It should return a dict mapping metric names to numerical values.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.KerasMetricCallback.eval_dataset" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.KerasMetricCallback.eval_dataset"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eval_dataset</strong> (<code>tf.data.Dataset</code> or <code>dict</code> or <code>tuple</code> or <code>np.ndarray</code> or <code>tf.Tensor</code>) &#x2014; Validation data to be used to generate predictions for the <code>metric_fn</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.KerasMetricCallback.output_cols" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.KerasMetricCallback.output_cols"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_cols</strong> (`List[str], <em>optional</em>) &#x2014; A list of columns to be retained from the model output as the predictions. Defaults to all.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.KerasMetricCallback.label_cols" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.KerasMetricCallback.label_cols"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>label_cols</strong> (&#x2019;<code>List[str]</code>, <em>optional</em>&#x2019;) &#x2014; A list of columns to be retained from the input dataset as the labels. Will be autodetected if this is not supplied.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.KerasMetricCallback.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.KerasMetricCallback.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>, <em>optional</em>) &#x2014; Batch size. Only used when the data is not a pre-batched <code>tf.data.Dataset</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.KerasMetricCallback.predict_with_generate" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.KerasMetricCallback.predict_with_generate"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>predict_with_generate</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether we should use <code>model.generate()</code> to get outputs for the model.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Callback to compute metrics at the end of every epoch. Unlike normal Keras metrics, these do not need to be compilable by TF. It is particularly useful for common NLP metrics like BLEU and ROUGE that require string operations or generation loops that cannot be compiled. Predictions (or generations) will be computed on the <code>eval_dataset</code> before being passed to the <code>metric_fn</code> in <code>np.ndarray</code> format. The <code>metric_fn</code> should compute metrics and return a dict mapping metric names to metric values.</p> <p>We provide an example of a suitable metric_fn that computes ROUGE scores for a summarization model below. Note that this example skips some post-processing for readability and simplicity, and should probably not be used as-is!</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_metric rouge_metric = load_metric(<span class="hljs-string">&quot;rouge&quot;</span>) <span class="hljs-keyword">def</span> <span class="hljs-title function_">rouge_fn</span>(<span class="hljs-params">predictions, labels</span>): decoded_predictions = tokenizer.batch_decode(predictions, skip_special_tokens=<span class="hljs-literal">True</span>) decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=<span class="hljs-literal">True</span>) result = rouge_metric.compute(predictions=decoded_predictions, references=decoded_labels) <span class="hljs-keyword">return</span> {key: value.mid.fmeasure * <span class="hljs-number">100</span> <span class="hljs-keyword">for</span> key, value <span class="hljs-keyword">in</span> result.items()}<!-- HTML_TAG_END --></pre></div> <p>The above function will return a dict containing values which will be logged like any other Keras metric:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->{&#x27;rouge1&#x27;: <span class="hljs-number">37.4199</span>, &#x27;rouge2&#x27;: <span class="hljs-number">13.9768</span>, &#x27;rougeL&#x27;: <span class="hljs-number">34.361</span>, &#x27;rougeLsum&#x27;: <span class="hljs-number">35.0781</span><!-- HTML_TAG_END --></pre></div></div> <h2 class="relative group"><a id="transformers.PushToHubCallback" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PushToHubCallback"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>PushToHubCallback </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PushToHubCallback"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">PushToHubCallback</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.PushToHubCallback" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PushToHubCallback"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/keras_callbacks.py#L242" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_dir<span class="opacity-60">: typing.Union[str, pathlib.Path]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_strategy<span class="opacity-60">: typing.Union[str, transformers.trainer_utils.IntervalStrategy] = &#39;epoch&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_steps<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer<span class="opacity-60">: typing.Optional[transformers.tokenization_utils_base.PreTrainedTokenizerBase] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hub_model_id<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hub_token<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">checkpoint<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**model_card_args<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PushToHubCallback.output_dir" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PushToHubCallback.output_dir"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_dir</strong> (<code>str</code>) &#x2014; The output directory where the model predictions and checkpoints will be written and synced with the repository on the Hub.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PushToHubCallback.save_strategy" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PushToHubCallback.save_strategy"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>save_strategy</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/trainer_utils#transformers.IntervalStrategy">IntervalStrategy</a>, <em>optional</em>, defaults to <code>&quot;epoch&quot;</code>) &#x2014; The checkpoint save strategy to adopt during training. Possible values are:</p> <ul> <li><code>&quot;no&quot;</code>: Save is done at the end of training.</li> <li><code>&quot;epoch&quot;</code>: Save is done at the end of each epoch.</li> <li><code>&quot;steps&quot;</code>: Save is done every <code>save_steps</code></li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PushToHubCallback.save_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PushToHubCallback.save_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>save_steps</strong> (<code>int</code>, <em>optional</em>) &#x2014; The number of steps between saves when using the &#x201C;steps&#x201D; <code>save_strategy</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PushToHubCallback.tokenizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PushToHubCallback.tokenizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokenizer</strong> (<code>PreTrainedTokenizerBase</code>, <em>optional</em>) &#x2014; The tokenizer used by the model. If supplied, will be uploaded to the repo alongside the weights.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PushToHubCallback.hub_model_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PushToHubCallback.hub_model_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hub_model_id</strong> (<code>str</code>, <em>optional</em>) &#x2014; The name of the repository to keep in sync with the local <code>output_dir</code>. It can be a simple model ID in which case the model will be pushed in your namespace. Otherwise it should be the whole repository name, for instance <code>&quot;user_name/model&quot;</code>, which allows you to push to an organization you are a member of with <code>&quot;organization_name/model&quot;</code>.</p> <p>Will default to to the name of <code>output_dir</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PushToHubCallback.hub_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PushToHubCallback.hub_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hub_token</strong> (<code>str</code>, <em>optional</em>) &#x2014; The token to use to push the model to the Hub. Will default to the token in the cache folder obtained with <code>huggingface-cli login</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PushToHubCallback.checkpoint" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PushToHubCallback.checkpoint"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>checkpoint</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to save full training checkpoints (including epoch and optimizer state) to allow training to be resumed. Only usable when <code>save_strategy</code> is <code>&quot;epoch&quot;</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Callback that will save and push the model to the Hub regularly. By default, it pushes once per epoch, but this can be changed with the <code>save_strategy</code> argument. Pushed models can be accessed like any other model on the hub, such as with the <code>from_pretrained</code> method.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers.keras_callbacks <span class="hljs-keyword">import</span> PushToHubCallback push_to_hub_callback = PushToHubCallback( output_dir=<span class="hljs-string">&quot;./model_save&quot;</span>, tokenizer=tokenizer, hub_model_id=<span class="hljs-string">&quot;gpt5-7xlarge&quot;</span>, ) model.fit(train_dataset, callbacks=[push_to_hub_callback])<!-- HTML_TAG_END --></pre></div></div> <script type="module" data-hydrate="fbqwvx"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="fbqwvx"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/main_classes/keras_callbacks.mdx-7ee92fa9.js") ], params: {} } }); </script>
434
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/main_classes/optimizer_schedules.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;optimization&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.AdamW&quot;,&quot;title&quot;:&quot;AdamW (PyTorch)&quot;},{&quot;local&quot;:&quot;transformers.Adafactor&quot;,&quot;title&quot;:&quot;AdaFactor (PyTorch)&quot;},{&quot;local&quot;:&quot;transformers.AdamWeightDecay&quot;,&quot;title&quot;:&quot;AdamWeightDecay (TensorFlow)&quot;},{&quot;local&quot;:&quot;schedules&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.SchedulerType&quot;,&quot;title&quot;:&quot;Learning Rate Schedules (Pytorch)&quot;},{&quot;local&quot;:&quot;transformers.WarmUp&quot;,&quot;title&quot;:&quot;Warmup (TensorFlow)&quot;}],&quot;title&quot;:&quot;Schedules&quot;},{&quot;local&quot;:&quot;gradient-strategies&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.GradientAccumulator&quot;,&quot;title&quot;:&quot;GradientAccumulator (TensorFlow)&quot;}],&quot;title&quot;:&quot;Gradient Strategies&quot;}],&quot;title&quot;:&quot;Optimization&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/main_classes/optimizer_schedules.mdx-d0982fc0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="optimization" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#optimization"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Optimization </span></h1> <p>The <code>.optimization</code> module provides:</p> <ul><li>an optimizer with weight decay fixed that can be used to fine-tuned models, and</li> <li>several schedules in the form of schedule objects that inherit from <code>_LRSchedule</code>:</li> <li>a gradient accumulation class to accumulate the gradients of multiple batches</li></ul> <h2 class="relative group"><a id="transformers.AdamW" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AdamW"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>AdamW (PyTorch) </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.AdamW"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">AdamW</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.AdamW" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.AdamW"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/optimization.py#L273" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60">: typing.Iterable[torch.nn.parameter.Parameter]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">lr<span class="opacity-60">: float = 0.001</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">betas<span class="opacity-60">: typing.Tuple[float, float] = (0.9, 0.999)</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eps<span class="opacity-60">: float = 1e-06</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">weight_decay<span class="opacity-60">: float = 0.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">correct_bias<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">no_deprecation_warning<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.AdamW.params" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AdamW.params"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>params</strong> (<code>Iterable[nn.parameter.Parameter]</code>) &#x2014; Iterable of parameters to optimize or dictionaries defining parameter groups.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.AdamW.lr" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AdamW.lr"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>lr</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-3) &#x2014; The learning rate to use.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.AdamW.betas" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AdamW.betas"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>betas</strong> (<code>Tuple[float,float]</code>, <em>optional</em>, defaults to (0.9, 0.999)) &#x2014; Adam&#x2019;s betas parameters (b1, b2).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.AdamW.eps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AdamW.eps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eps</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-6) &#x2014; Adam&#x2019;s epsilon for numerical stability.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.AdamW.weight_decay" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AdamW.weight_decay"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>weight_decay</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; Decoupled weight decay to apply.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.AdamW.correct_bias" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AdamW.correct_bias"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>correct_bias</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to correct bias in Adam (for instance, in Bert TF repository they use <code>False</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.AdamW.no_deprecation_warning" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AdamW.no_deprecation_warning"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>no_deprecation_warning</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; A flag used to disable the deprecation warning (set to <code>True</code> to disable the warning).<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Implements Adam algorithm with weight decay fix as introduced in <a href="https://arxiv.org/abs/1711.05101" rel="nofollow">Decoupled Weight Decay Regularization</a>.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.AdamW.step"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>step</span></h4><!-- HTML_TAG_END --> <a id="transformers.AdamW.step" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.AdamW.step"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/optimization.py#L323" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">closure<span class="opacity-60">: typing.Callable = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.AdamW.step.closure" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AdamW.step.closure"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>closure</strong> (<code>Callable</code>, <em>optional</em>) &#x2014; A closure that reevaluates the model and returns the loss.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Performs a single optimization step.</p></div></div> <h2 class="relative group"><a id="transformers.Adafactor" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Adafactor"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>AdaFactor (PyTorch) </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Adafactor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">Adafactor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.Adafactor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Adafactor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/optimization.py#L385" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">params<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">lr<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eps<span class="opacity-60"> = (1e-30, 0.001)</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">clip_threshold<span class="opacity-60"> = 1.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decay_rate<span class="opacity-60"> = -0.8</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">beta1<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">weight_decay<span class="opacity-60"> = 0.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scale_parameter<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">relative_step<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">warmup_init<span class="opacity-60"> = False</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Adafactor.params" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Adafactor.params"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>params</strong> (<code>Iterable[nn.parameter.Parameter]</code>) &#x2014; Iterable of parameters to optimize or dictionaries defining parameter groups.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Adafactor.lr" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Adafactor.lr"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>lr</strong> (<code>float</code>, <em>optional</em>) &#x2014; The external learning rate.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Adafactor.eps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Adafactor.eps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eps</strong> (<code>Tuple[float, float]</code>, <em>optional</em>, defaults to (1e-30, 1e-3)) &#x2014; Regularization constants for square gradient and parameter scale respectively<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Adafactor.clip_threshold" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Adafactor.clip_threshold"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>clip_threshold</strong> (<code>float</code>, <em>optional</em>, defaults 1.0) &#x2014; Threshold of root mean square of final gradient update<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Adafactor.decay_rate" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Adafactor.decay_rate"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decay_rate</strong> (<code>float</code>, <em>optional</em>, defaults to -0.8) &#x2014; Coefficient used to compute running averages of square<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Adafactor.beta1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Adafactor.beta1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>beta1</strong> (<code>float</code>, <em>optional</em>) &#x2014; Coefficient used for computing running averages of gradient<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Adafactor.weight_decay" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Adafactor.weight_decay"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>weight_decay</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; Weight decay (L2 penalty)<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Adafactor.scale_parameter" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Adafactor.scale_parameter"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scale_parameter</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If True, learning rate is scaled by root mean square<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Adafactor.relative_step" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Adafactor.relative_step"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>relative_step</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; If True, time-dependent learning rate is computed instead of external learning rate<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Adafactor.warmup_init" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Adafactor.warmup_init"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>warmup_init</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Time-dependent learning rate computation depends on whether warm-up initialization is being used<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>AdaFactor pytorch implementation can be used as a drop in replacement for Adam original fairseq code: <a href="https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py" rel="nofollow">https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py</a></p> <p>Paper: <em>Adafactor: Adaptive Learning Rates with Sublinear Memory Cost</em> <a href="https://arxiv.org/abs/1804.04235" rel="nofollow">https://arxiv.org/abs/1804.04235</a> Note that this optimizer internally adjusts the learning rate depending on the <code>scale_parameter</code>, <code>relative_step</code> and <code>warmup_init</code> options. To use a manual (external) learning rate schedule you should set <code>scale_parameter=False</code> and <code>relative_step=False</code>.</p> <p>This implementation handles low-precision (FP16, bfloat) values, but we have not thoroughly tested.</p> <p>Recommended T5 finetuning settings (<a href="https://discuss.huggingface.co/t/t5-finetuning-tips/684/3" rel="nofollow">https://discuss.huggingface.co/t/t5-finetuning-tips/684/3</a>):</p> <ul><li><p>Training without LR warmup or clip_threshold is not recommended.</p> <ul><li>use scheduled LR warm-up to fixed LR</li> <li>use clip_threshold=1.0 (<a href="https://arxiv.org/abs/1804.04235" rel="nofollow">https://arxiv.org/abs/1804.04235</a>)</li></ul></li> <li><p>Disable relative updates</p></li> <li><p>Use scale_parameter=False</p></li> <li><p>Additional optimizer operations like gradient clipping should not be used alongside Adafactor</p></li></ul> <p>Example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->Adafactor(model.parameters(), scale_parameter=<span class="hljs-literal">False</span>, relative_step=<span class="hljs-literal">False</span>, warmup_init=<span class="hljs-literal">False</span>, lr=<span class="hljs-number">1e-3</span>)<!-- HTML_TAG_END --></pre></div> <p>Others reported the following combination to work well:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->Adafactor(model.parameters(), scale_parameter=<span class="hljs-literal">True</span>, relative_step=<span class="hljs-literal">True</span>, warmup_init=<span class="hljs-literal">True</span>, lr=<span class="hljs-literal">None</span>)<!-- HTML_TAG_END --></pre></div> <p>When using <code>lr=None</code> with <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> you will most likely need to use <code>AdafactorSchedule</code>scheduler as following:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers.optimization <span class="hljs-keyword">import</span> Adafactor, AdafactorSchedule optimizer = Adafactor(model.parameters(), scale_parameter=<span class="hljs-literal">True</span>, relative_step=<span class="hljs-literal">True</span>, warmup_init=<span class="hljs-literal">True</span>, lr=<span class="hljs-literal">None</span>) lr_scheduler = AdafactorSchedule(optimizer) trainer = Trainer(..., optimizers=(optimizer, lr_scheduler))<!-- HTML_TAG_END --></pre></div> <p>Usage:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-comment"># replace AdamW with Adafactor</span> optimizer = Adafactor( model.parameters(), lr=<span class="hljs-number">1e-3</span>, eps=(<span class="hljs-number">1e-30</span>, <span class="hljs-number">1e-3</span>), clip_threshold=<span class="hljs-number">1.0</span>, decay_rate=-<span class="hljs-number">0.8</span>, beta1=<span class="hljs-literal">None</span>, weight_decay=<span class="hljs-number">0.0</span>, relative_step=<span class="hljs-literal">False</span>, scale_parameter=<span class="hljs-literal">False</span>, warmup_init=<span class="hljs-literal">False</span>, )<!-- HTML_TAG_END --></pre></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Adafactor.step"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>step</span></h4><!-- HTML_TAG_END --> <a id="transformers.Adafactor.step" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Adafactor.step"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/optimization.py#L531" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">closure<span class="opacity-60"> = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Adafactor.step.closure" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Adafactor.step.closure"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>closure</strong> (callable, optional) &#x2014; A closure that reevaluates the model and returns the loss.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Performs a single optimization step</p></div></div> <h2 class="relative group"><a id="transformers.AdamWeightDecay" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AdamWeightDecay"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>AdamWeightDecay (TensorFlow) </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.AdamWeightDecay"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">AdamWeightDecay</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.AdamWeightDecay" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.AdamWeightDecay"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/optimization_tf.py#L152" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">learning_rate<span class="opacity-60">: typing.Union[float, keras.optimizer_v2.learning_rate_schedule.LearningRateSchedule] = 0.001</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">beta_1<span class="opacity-60">: float = 0.9</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">beta_2<span class="opacity-60">: float = 0.999</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">epsilon<span class="opacity-60">: float = 1e-07</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">amsgrad<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">weight_decay_rate<span class="opacity-60">: float = 0.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">include_in_weight_decay<span class="opacity-60">: typing.Optional[typing.List[str]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">exclude_from_weight_decay<span class="opacity-60">: typing.Optional[typing.List[str]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">name<span class="opacity-60">: str = &#39;AdamWeightDecay&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.AdamWeightDecay.learning_rate" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AdamWeightDecay.learning_rate"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>learning_rate</strong> (<code>Union[float, tf.keras.optimizers.schedules.LearningRateSchedule]</code>, <em>optional</em>, defaults to 1e-3) &#x2014; The learning rate to use or a schedule.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.AdamWeightDecay.beta_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AdamWeightDecay.beta_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>beta_1</strong> (<code>float</code>, <em>optional</em>, defaults to 0.9) &#x2014; The beta1 parameter in Adam, which is the exponential decay rate for the 1st momentum estimates.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.AdamWeightDecay.beta_2" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AdamWeightDecay.beta_2"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>beta_2</strong> (<code>float</code>, <em>optional</em>, defaults to 0.999) &#x2014; The beta2 parameter in Adam, which is the exponential decay rate for the 2nd momentum estimates.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.AdamWeightDecay.epsilon" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AdamWeightDecay.epsilon"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>epsilon</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-7) &#x2014; The epsilon parameter in Adam, which is a small constant for numerical stability.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.AdamWeightDecay.amsgrad" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AdamWeightDecay.amsgrad"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>amsgrad</strong> (<code>bool</code>, <em>optional</em>, default to <code>False</code>) &#x2014; Whether to apply AMSGrad variant of this algorithm or not, see <a href="https://arxiv.org/abs/1904.09237" rel="nofollow">On the Convergence of Adam and Beyond</a>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.AdamWeightDecay.weight_decay_rate" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AdamWeightDecay.weight_decay_rate"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>weight_decay_rate</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; The weight decay to apply.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.AdamWeightDecay.include_in_weight_decay" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AdamWeightDecay.include_in_weight_decay"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>include_in_weight_decay</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; List of the parameter names (or re patterns) to apply weight decay to. If none is passed, weight decay is applied to all parameters by default (unless they are in <code>exclude_from_weight_decay</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.AdamWeightDecay.exclude_from_weight_decay" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AdamWeightDecay.exclude_from_weight_decay"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>exclude_from_weight_decay</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; List of the parameter names (or re patterns) to exclude from applying weight decay to. If a <code>include_in_weight_decay</code> is passed, the names in it will supersede this list.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.AdamWeightDecay.name" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.AdamWeightDecay.name"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>name</strong> (<code>str</code>, <em>optional</em>, defaults to &#x2018;AdamWeightDecay&#x2019;) &#x2014; Optional name for the operations created when applying gradients. kwargs &#x2014; Keyword arguments. Allowed to be {<code>clipnorm</code>, <code>clipvalue</code>, <code>lr</code>, <code>decay</code>}. <code>clipnorm</code> is clip gradients by norm; <code>clipvalue</code> is clip gradients by value, <code>decay</code> is included for backward compatibility to allow time inverse decay of learning rate. <code>lr</code> is included for backward compatibility, recommended to use <code>learning_rate</code> instead.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Adam enables L2 weight decay and clip_by_global_norm on gradients. Just adding the square of the weights to the loss function is <em>not</em> the correct way of using L2 regularization/weight decay with Adam, since that will interact with the m and v parameters in strange ways as shown in <a href="https://arxiv.org/abs/1711.05101" rel="nofollow">Decoupled Weight Decay Regularization</a>.</p> <p>Instead we want ot decay the weights in a manner that doesn’t interact with the m/v parameters. This is equivalent to adding the square of the weights to the loss with plain (non-momentum) SGD.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.AdamWeightDecay.from_config"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>from_config</span></h4><!-- HTML_TAG_END --> <a id="transformers.AdamWeightDecay.from_config" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.AdamWeightDecay.from_config"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/optimization_tf.py#L209" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Creates an optimizer from its config with WarmUp custom object.</p></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.create_optimizer"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.create_optimizer</span></h4><!-- HTML_TAG_END --> <a id="transformers.create_optimizer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.create_optimizer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/optimization_tf.py#L82" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">init_lr<span class="opacity-60">: float</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_train_steps<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_warmup_steps<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">min_lr_ratio<span class="opacity-60">: float = 0.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">adam_beta1<span class="opacity-60">: float = 0.9</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">adam_beta2<span class="opacity-60">: float = 0.999</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">adam_epsilon<span class="opacity-60">: float = 1e-08</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">weight_decay_rate<span class="opacity-60">: float = 0.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">power<span class="opacity-60">: float = 1.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">include_in_weight_decay<span class="opacity-60">: typing.Optional[typing.List[str]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.create_optimizer.init_lr" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.create_optimizer.init_lr"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>init_lr</strong> (<code>float</code>) &#x2014; The desired learning rate at the end of the warmup phase.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.create_optimizer.num_train_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.create_optimizer.num_train_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_train_steps</strong> (<code>int</code>) &#x2014; The total number of training steps.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.create_optimizer.num_warmup_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.create_optimizer.num_warmup_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_warmup_steps</strong> (<code>int</code>) &#x2014; The number of warmup steps.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.create_optimizer.min_lr_ratio" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.create_optimizer.min_lr_ratio"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>min_lr_ratio</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; The final learning rate at the end of the linear decay will be <code>init_lr * min_lr_ratio</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.create_optimizer.adam_beta1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.create_optimizer.adam_beta1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>adam_beta1</strong> (<code>float</code>, <em>optional</em>, defaults to 0.9) &#x2014; The beta1 to use in Adam.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.create_optimizer.adam_beta2" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.create_optimizer.adam_beta2"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>adam_beta2</strong> (<code>float</code>, <em>optional</em>, defaults to 0.999) &#x2014; The beta2 to use in Adam.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.create_optimizer.adam_epsilon" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.create_optimizer.adam_epsilon"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>adam_epsilon</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-8) &#x2014; The epsilon to use in Adam.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.create_optimizer.weight_decay_rate" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.create_optimizer.weight_decay_rate"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>weight_decay_rate</strong> (<code>float</code>, <em>optional</em>, defaults to 0) &#x2014; The weight decay to use.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.create_optimizer.power" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.create_optimizer.power"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>power</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; The power to use for PolynomialDecay.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.create_optimizer.include_in_weight_decay" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.create_optimizer.include_in_weight_decay"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>include_in_weight_decay</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014; List of the parameter names (or re patterns) to apply weight decay to. If none is passed, weight decay is applied to all parameters except bias and layer norm parameters.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Creates an optimizer with a learning rate schedule using a warmup phase followed by a linear decay.</p></div> <h2 class="relative group"><a id="schedules" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#schedules"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Schedules </span></h2> <h3 class="relative group"><a id="transformers.SchedulerType" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SchedulerType"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Learning Rate Schedules (Pytorch) </span></h3> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.SchedulerType"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">SchedulerType</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.SchedulerType" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.SchedulerType"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_utils.py#L301" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">value<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">names<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">module<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">qualname<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">type<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start<span class="opacity-60"> = 1</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>An enumeration.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.get_scheduler"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.get_scheduler</span></h4><!-- HTML_TAG_END --> <a id="transformers.get_scheduler" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.get_scheduler"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/optimization.py#L233" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">name<span class="opacity-60">: typing.Union[str, transformers.trainer_utils.SchedulerType]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">optimizer<span class="opacity-60">: Optimizer</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_warmup_steps<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_training_steps<span class="opacity-60">: typing.Optional[int] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.get_scheduler.name" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_scheduler.name"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>name</strong> (<code>str</code> or <code>SchedulerType</code>) &#x2014; The name of the scheduler to use.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.get_scheduler.optimizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_scheduler.optimizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>optimizer</strong> (<code>torch.optim.Optimizer</code>) &#x2014; The optimizer that will be used during training.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.get_scheduler.num_warmup_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_scheduler.num_warmup_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_warmup_steps</strong> (<code>int</code>, <em>optional</em>) &#x2014; The number of warmup steps to do. This is not required by all schedulers (hence the argument being optional), the function will raise an error if it&#x2019;s unset and the scheduler type requires it.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.get_scheduler.num_training_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_scheduler.num_training_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_training_steps</strong> (`int&#x201C;, <em>optional</em>) &#x2014; The number of training steps to do. This is not required by all schedulers (hence the argument being optional), the function will raise an error if it&#x2019;s unset and the scheduler type requires it.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Unified API to get any scheduler from its name.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.get_constant_schedule"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.get_constant_schedule</span></h4><!-- HTML_TAG_END --> <a id="transformers.get_constant_schedule" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.get_constant_schedule"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/optimization.py#L34" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">optimizer<span class="opacity-60">: Optimizer</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_epoch<span class="opacity-60">: int = -1</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.get_constant_schedule.optimizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_constant_schedule.optimizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>optimizer</strong> (<code>Optimizer</code>) &#x2014; The optimizer for which to schedule the learning rate.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.get_constant_schedule.last_epoch" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_constant_schedule.last_epoch"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_epoch</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; The index of the last epoch when resuming training.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Create a schedule with a constant learning rate, using the learning rate set in optimizer.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.get_constant_schedule_with_warmup"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.get_constant_schedule_with_warmup</span></h4><!-- HTML_TAG_END --> <a id="transformers.get_constant_schedule_with_warmup" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.get_constant_schedule_with_warmup"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/optimization.py#L50" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">optimizer<span class="opacity-60">: Optimizer</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_warmup_steps<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_epoch<span class="opacity-60">: int = -1</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.get_constant_schedule_with_warmup.optimizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_constant_schedule_with_warmup.optimizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>optimizer</strong> (<code>Optimizer</code>) &#x2014; The optimizer for which to schedule the learning rate.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.get_constant_schedule_with_warmup.num_warmup_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_constant_schedule_with_warmup.num_warmup_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_warmup_steps</strong> (<code>int</code>) &#x2014; The number of steps for the warmup phase.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.get_constant_schedule_with_warmup.last_epoch" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_constant_schedule_with_warmup.last_epoch"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_epoch</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; The index of the last epoch when resuming training.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate increases linearly between 0 and the initial lr set in the optimizer.</p></div> <img alt="" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/warmup_constant_schedule.png"> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.get_cosine_schedule_with_warmup"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.get_cosine_schedule_with_warmup</span></h4><!-- HTML_TAG_END --> <a id="transformers.get_cosine_schedule_with_warmup" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.get_cosine_schedule_with_warmup"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/optimization.py#L104" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">optimizer<span class="opacity-60">: Optimizer</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_warmup_steps<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_training_steps<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_cycles<span class="opacity-60">: float = 0.5</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_epoch<span class="opacity-60">: int = -1</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.get_cosine_schedule_with_warmup.optimizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_cosine_schedule_with_warmup.optimizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>optimizer</strong> (<code>Optimizer</code>) &#x2014; The optimizer for which to schedule the learning rate.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.get_cosine_schedule_with_warmup.num_warmup_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_cosine_schedule_with_warmup.num_warmup_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_warmup_steps</strong> (<code>int</code>) &#x2014; The number of steps for the warmup phase.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.get_cosine_schedule_with_warmup.num_training_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_cosine_schedule_with_warmup.num_training_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_training_steps</strong> (<code>int</code>) &#x2014; The total number of training steps.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.get_cosine_schedule_with_warmup.num_cycles" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_cosine_schedule_with_warmup.num_cycles"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_cycles</strong> (<code>float</code>, <em>optional</em>, defaults to 0.5) &#x2014; The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0 following a half-cosine).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.get_cosine_schedule_with_warmup.last_epoch" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_cosine_schedule_with_warmup.last_epoch"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_epoch</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; The index of the last epoch when resuming training.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Create a schedule with a learning rate that decreases following the values of the cosine function between the initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer.</p></div> <img alt="" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/warmup_cosine_schedule.png"> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.get_cosine_with_hard_restarts_schedule_with_warmup"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.get_cosine_with_hard_restarts_schedule_with_warmup</span></h4><!-- HTML_TAG_END --> <a id="transformers.get_cosine_with_hard_restarts_schedule_with_warmup" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.get_cosine_with_hard_restarts_schedule_with_warmup"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/optimization.py#L138" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">optimizer<span class="opacity-60">: Optimizer</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_warmup_steps<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_training_steps<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_cycles<span class="opacity-60">: int = 1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_epoch<span class="opacity-60">: int = -1</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.get_cosine_with_hard_restarts_schedule_with_warmup.optimizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_cosine_with_hard_restarts_schedule_with_warmup.optimizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>optimizer</strong> (<code>Optimizer</code>) &#x2014; The optimizer for which to schedule the learning rate.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.get_cosine_with_hard_restarts_schedule_with_warmup.num_warmup_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_cosine_with_hard_restarts_schedule_with_warmup.num_warmup_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_warmup_steps</strong> (<code>int</code>) &#x2014; The number of steps for the warmup phase.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.get_cosine_with_hard_restarts_schedule_with_warmup.num_training_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_cosine_with_hard_restarts_schedule_with_warmup.num_training_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_training_steps</strong> (<code>int</code>) &#x2014; The total number of training steps.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.get_cosine_with_hard_restarts_schedule_with_warmup.num_cycles" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_cosine_with_hard_restarts_schedule_with_warmup.num_cycles"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_cycles</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; The number of hard restarts to use.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.get_cosine_with_hard_restarts_schedule_with_warmup.last_epoch" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_cosine_with_hard_restarts_schedule_with_warmup.last_epoch"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_epoch</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; The index of the last epoch when resuming training.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Create a schedule with a learning rate that decreases following the values of the cosine function between the initial lr set in the optimizer to 0, with several hard restarts, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer.</p></div> <img alt="" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/warmup_cosine_hard_restarts_schedule.png"> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.get_linear_schedule_with_warmup"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.get_linear_schedule_with_warmup</span></h4><!-- HTML_TAG_END --> <a id="transformers.get_linear_schedule_with_warmup" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.get_linear_schedule_with_warmup"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/optimization.py#L75" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">optimizer<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_warmup_steps<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_training_steps<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_epoch<span class="opacity-60"> = -1</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.get_linear_schedule_with_warmup.optimizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_linear_schedule_with_warmup.optimizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>optimizer</strong> (<code>Optimizer</code>) &#x2014; The optimizer for which to schedule the learning rate.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.get_linear_schedule_with_warmup.num_warmup_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_linear_schedule_with_warmup.num_warmup_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_warmup_steps</strong> (<code>int</code>) &#x2014; The number of steps for the warmup phase.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.get_linear_schedule_with_warmup.num_training_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_linear_schedule_with_warmup.num_training_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_training_steps</strong> (<code>int</code>) &#x2014; The total number of training steps.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.get_linear_schedule_with_warmup.last_epoch" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_linear_schedule_with_warmup.last_epoch"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_epoch</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; The index of the last epoch when resuming training.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0, after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.</p></div> <img alt="" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/warmup_linear_schedule.png"> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.get_polynomial_decay_schedule_with_warmup"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.get_polynomial_decay_schedule_with_warmup</span></h4><!-- HTML_TAG_END --> <a id="transformers.get_polynomial_decay_schedule_with_warmup" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.get_polynomial_decay_schedule_with_warmup"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/optimization.py#L173" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">optimizer<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_warmup_steps<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_training_steps<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">lr_end<span class="opacity-60"> = 1e-07</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">power<span class="opacity-60"> = 1.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_epoch<span class="opacity-60"> = -1</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.get_polynomial_decay_schedule_with_warmup.optimizer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_polynomial_decay_schedule_with_warmup.optimizer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>optimizer</strong> (<code>Optimizer</code>) &#x2014; The optimizer for which to schedule the learning rate.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.get_polynomial_decay_schedule_with_warmup.num_warmup_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_polynomial_decay_schedule_with_warmup.num_warmup_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_warmup_steps</strong> (<code>int</code>) &#x2014; The number of steps for the warmup phase.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.get_polynomial_decay_schedule_with_warmup.num_training_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_polynomial_decay_schedule_with_warmup.num_training_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_training_steps</strong> (<code>int</code>) &#x2014; The total number of training steps.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.get_polynomial_decay_schedule_with_warmup.lr_end" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_polynomial_decay_schedule_with_warmup.lr_end"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>lr_end</strong> (<code>float</code>, <em>optional</em>, defaults to 1e-7) &#x2014; The end LR.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.get_polynomial_decay_schedule_with_warmup.power" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_polynomial_decay_schedule_with_warmup.power"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>power</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; Power factor.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.get_polynomial_decay_schedule_with_warmup.last_epoch" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.get_polynomial_decay_schedule_with_warmup.last_epoch"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_epoch</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; The index of the last epoch when resuming training.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Create a schedule with a learning rate that decreases as a polynomial decay from the initial lr set in the optimizer to end lr defined by <em>lr_end</em>, after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.</p> <p>Note: <em>power</em> defaults to 1.0 as in the fairseq implementation, which in turn is based on the original BERT implementation at <a href="https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/optimization.py#L37" rel="nofollow">https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/optimization.py#L37</a></p></div> <h3 class="relative group"><a id="transformers.WarmUp" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.WarmUp"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Warmup (TensorFlow) </span></h3> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.WarmUp"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">WarmUp</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.WarmUp" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.WarmUp"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/optimization_tf.py#L24" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">initial_learning_rate<span class="opacity-60">: float</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decay_schedule_fn<span class="opacity-60">: typing.Callable</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">warmup_steps<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">power<span class="opacity-60">: float = 1.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">name<span class="opacity-60">: str = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.WarmUp.initial_learning_rate" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.WarmUp.initial_learning_rate"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>initial_learning_rate</strong> (<code>float</code>) &#x2014; The initial learning rate for the schedule after the warmup (so this will be the learning rate at the end of the warmup).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.WarmUp.decay_schedule_fn" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.WarmUp.decay_schedule_fn"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decay_schedule_fn</strong> (<code>Callable</code>) &#x2014; The schedule function to apply after the warmup for the rest of training.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.WarmUp.warmup_steps" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.WarmUp.warmup_steps"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>warmup_steps</strong> (<code>int</code>) &#x2014; The number of steps for the warmup part of training.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.WarmUp.power" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.WarmUp.power"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>power</strong> (<code>float</code>, <em>optional</em>, defaults to 1) &#x2014; The power to use for the polynomial warmup (defaults is a linear warmup).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.WarmUp.name" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.WarmUp.name"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>name</strong> (<code>str</code>, <em>optional</em>) &#x2014; Optional name prefix for the returned tensors during the schedule.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Applies a warmup schedule on a given learning rate decay schedule.</p></div> <h2 class="relative group"><a id="gradient-strategies" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#gradient-strategies"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Gradient Strategies </span></h2> <h3 class="relative group"><a id="transformers.GradientAccumulator" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.GradientAccumulator"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>GradientAccumulator (TensorFlow) </span></h3> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.GradientAccumulator"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">GradientAccumulator</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.GradientAccumulator" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.GradientAccumulator"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/optimization_tf.py#L282" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Gradient accumulation utility. When used with a distribution strategy, the accumulator should be called in a replica context. Gradients will be accumulated locally on each replica and without synchronization. Users should then call <code>.gradients</code>, scale the gradients if required, and pass the result to <code>apply_gradients</code>.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.GradientAccumulator.reset"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>reset</span></h4><!-- HTML_TAG_END --> <a id="transformers.GradientAccumulator.reset" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.GradientAccumulator.reset"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/optimization_tf.py#L344" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Resets the accumulated gradients on the current replica.</p></div></div> <script type="module" data-hydrate="ba616x"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="ba616x"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/main_classes/optimizer_schedules.mdx-d0982fc0.js") ], params: {} } }); </script>
435
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/main_classes/deepspeed.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;deepspeed-integration&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;trainer-deepspeed-integration&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;installation&quot;,&quot;title&quot;:&quot;Installation&quot;},{&quot;local&quot;:&quot;deployment-with-multiple-gpus&quot;,&quot;title&quot;:&quot;Deployment with multiple GPUs&quot;},{&quot;local&quot;:&quot;deployment-with-one-gpu&quot;,&quot;title&quot;:&quot;Deployment with one GPU&quot;},{&quot;local&quot;:&quot;deployment-in-notebooks&quot;,&quot;title&quot;:&quot;Deployment in Notebooks&quot;},{&quot;local&quot;:&quot;configuration&quot;,&quot;title&quot;:&quot;Configuration&quot;},{&quot;local&quot;:&quot;passing-configuration&quot;,&quot;title&quot;:&quot;Passing Configuration&quot;},{&quot;local&quot;:&quot;shared-configuration&quot;,&quot;title&quot;:&quot;Shared Configuration&quot;},{&quot;local&quot;:&quot;zero&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;zero2-config&quot;,&quot;title&quot;:&quot;ZeRO-2 Config&quot;},{&quot;local&quot;:&quot;zero3-config&quot;,&quot;title&quot;:&quot;ZeRO-3 Config&quot;}],&quot;title&quot;:&quot;ZeRO&quot;},{&quot;local&quot;:&quot;nvme-support&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;zero2-vs-zero3-performance&quot;,&quot;title&quot;:&quot;ZeRO-2 vs ZeRO-3 Performance&quot;},{&quot;local&quot;:&quot;zero2-example&quot;,&quot;title&quot;:&quot;ZeRO-2 Example&quot;},{&quot;local&quot;:&quot;zero3-example&quot;,&quot;title&quot;:&quot;ZeRO-3 Example&quot;}],&quot;title&quot;:&quot;NVMe Support&quot;},{&quot;local&quot;:&quot;optimizer-and-scheduler&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;optimizer&quot;,&quot;title&quot;:&quot;Optimizer&quot;},{&quot;local&quot;:&quot;scheduler&quot;,&quot;title&quot;:&quot;Scheduler&quot;}],&quot;title&quot;:&quot;Optimizer and Scheduler&quot;},{&quot;local&quot;:&quot;fp32-precision&quot;,&quot;title&quot;:&quot;fp32 Precision&quot;},{&quot;local&quot;:&quot;automatic-mixed-precision&quot;,&quot;title&quot;:&quot;Automatic Mixed Precision&quot;},{&quot;local&quot;:&quot;fp16&quot;,&quot;title&quot;:&quot;fp16&quot;},{&quot;local&quot;:&quot;bf16&quot;,&quot;title&quot;:&quot;bf16&quot;},{&quot;local&quot;:&quot;apex&quot;,&quot;title&quot;:&quot;apex&quot;},{&quot;local&quot;:&quot;batch-size&quot;,&quot;title&quot;:&quot;Batch Size&quot;},{&quot;local&quot;:&quot;gradient-accumulation&quot;,&quot;title&quot;:&quot;Gradient Accumulation&quot;},{&quot;local&quot;:&quot;gradient-clipping&quot;,&quot;title&quot;:&quot;Gradient Clipping&quot;},{&quot;local&quot;:&quot;getting-the-model-weights-out&quot;,&quot;title&quot;:&quot;Getting The Model Weights Out&quot;},{&quot;local&quot;:&quot;zero3-and-infinity-nuances&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;constructing-massive-models&quot;,&quot;title&quot;:&quot;Constructing Massive Models&quot;},{&quot;local&quot;:&quot;gathering-parameters&quot;,&quot;title&quot;:&quot;Gathering Parameters&quot;}],&quot;title&quot;:&quot;ZeRO-3 and Infinity Nuances&quot;},{&quot;local&quot;:&quot;zero-inference&quot;,&quot;title&quot;:&quot;ZeRO Inference&quot;},{&quot;local&quot;:&quot;memory-requirements&quot;,&quot;title&quot;:&quot;Memory Requirements&quot;},{&quot;local&quot;:&quot;filing-issues&quot;,&quot;title&quot;:&quot;Filing Issues&quot;},{&quot;local&quot;:&quot;troubleshooting&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;the-deepspeed-process-gets-killed-at-startup-without-a-traceback&quot;,&quot;title&quot;:&quot;the `deepspeed` process gets killed at startup without a traceback&quot;},{&quot;local&quot;:&quot;training-andor-evalpredict-loss-is-nan&quot;,&quot;title&quot;:&quot;training and/or eval/predict loss is `NaN`&quot;}],&quot;title&quot;:&quot;Troubleshooting&quot;},{&quot;local&quot;:&quot;notes&quot;,&quot;title&quot;:&quot;Notes&quot;}],&quot;title&quot;:&quot;Trainer Deepspeed Integration&quot;},{&quot;local&quot;:&quot;nontrainer-deepspeed-integration&quot;,&quot;title&quot;:&quot;Non-Trainer Deepspeed Integration&quot;},{&quot;local&quot;:&quot;transformers.deepspeed.HfDeepSpeedConfig&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;custom-deepspeed-zero-inference&quot;,&quot;title&quot;:&quot;Custom DeepSpeed ZeRO Inference&quot;}],&quot;title&quot;:&quot;HfDeepSpeedConfig&quot;},{&quot;local&quot;:&quot;main-deepspeed-resources&quot;,&quot;title&quot;:&quot;Main DeepSpeed Resources&quot;}],&quot;title&quot;:&quot;DeepSpeed Integration&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/main_classes/deepspeed.mdx-f19f464c.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="deepspeed-integration" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#deepspeed-integration"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>DeepSpeed Integration </span></h1> <p><a href="https://github.com/microsoft/DeepSpeed" rel="nofollow">DeepSpeed</a> implements everything described in the <a href="https://arxiv.org/abs/1910.02054" rel="nofollow">ZeRO paper</a>. Currently it provides full support for:</p> <ol><li>Optimizer state partitioning (ZeRO stage 1)</li> <li>Gradient partitioning (ZeRO stage 2)</li> <li>Parameter partitioning (ZeRO stage 3)</li> <li>Custom mixed precision training handling</li> <li>A range of fast CUDA-extension-based optimizers</li> <li>ZeRO-Offload to CPU and NVMe</li></ol> <p>ZeRO-Offload has its own dedicated paper: <a href="https://arxiv.org/abs/2101.06840" rel="nofollow">ZeRO-Offload: Democratizing Billion-Scale Model Training</a>. And NVMe-support is described in the paper <a href="https://arxiv.org/abs/2104.07857" rel="nofollow">ZeRO-Infinity: Breaking the GPU Memory Wall for Extreme Scale Deep Learning</a>.</p> <p>DeepSpeed ZeRO-2 is primarily used only for training, as its features are of no use to inference.</p> <p>DeepSpeed ZeRO-3 can be used for inference as well, since it allows huge models to be loaded on multiple GPUs, which won’t be possible on a single GPU.</p> <p>🤗 Transformers integrates <a href="https://github.com/microsoft/DeepSpeed" rel="nofollow">DeepSpeed</a> via 2 options:</p> <ol><li>Integration of the core DeepSpeed features via <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>. This is an everything-done-for-you type of integration - just supply your custom config file or use our template and you have nothing else to do. Most of this document is focused on this feature.</li> <li>If you don’t use <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> and want to use your own Trainer where you integrated DeepSpeed yourself, core functionality functions like <code>from_pretrained</code> and <code>from_config</code> include integration of essential parts of DeepSpeed like <code>zero.Init</code> for ZeRO stage 3 and higher. To tap into this feature read the docs on <a href="#deepspeed-non-trainer-integration">deepspeed-non-trainer-integration</a>.</li></ol> <p>What is integrated:</p> <p>Training:</p> <ol><li>DeepSpeed ZeRO training supports the full ZeRO stages 1, 2 and 3 with ZeRO-Infinity (CPU and NVME offload).</li></ol> <p>Inference:</p> <ol><li>DeepSpeed ZeRO Inference supports ZeRO stage 3 with ZeRO-Infinity. It uses the same ZeRO protocol as training, but it doesn’t use an optimizer and a lr scheduler and only stage 3 is relevant. For more details see: <a href="#deepspeed-zero-inference">deepspeed-zero-inference</a>.</li></ol> <p>There is also DeepSpeed Inference - this is a totally different technology which uses Tensor Parallelism instead of ZeRO (coming soon).</p> <a id="deepspeed-trainer-integration"></a> <h2 class="relative group"><a id="trainer-deepspeed-integration" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#trainer-deepspeed-integration"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Trainer Deepspeed Integration </span></h2> <a id="deepspeed-installation"></a> <h3 class="relative group"><a id="installation" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#installation"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Installation </span></h3> <p>Install the library via pypi:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install deepspeed<!-- HTML_TAG_END --></pre></div> <p>or via <code>transformers</code>’ <code>extras</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install transformers[deepspeed]<!-- HTML_TAG_END --></pre></div> <p>or find more details on <a href="https://github.com/microsoft/deepspeed#installation" rel="nofollow">the DeepSpeed’s GitHub page</a> and <a href="https://www.deepspeed.ai/tutorials/advanced-install/" rel="nofollow">advanced install</a>.</p> <p>If you’re still struggling with the build, first make sure to read <a href="#zero-install-notes">zero-install-notes</a>.</p> <p>If you don’t prebuild the extensions and rely on them to be built at run time and you tried all of the above solutions to no avail, the next thing to try is to pre-build the modules before installing them.</p> <p>To make a local build for DeepSpeed:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->git <span class="hljs-built_in">clone</span> https://github.com/microsoft/DeepSpeed/ <span class="hljs-built_in">cd</span> DeepSpeed <span class="hljs-built_in">rm</span> -rf build TORCH_CUDA_ARCH_LIST=<span class="hljs-string">&quot;8.6&quot;</span> DS_BUILD_CPU_ADAM=1 DS_BUILD_UTILS=1 pip install . \ --global-option=<span class="hljs-string">&quot;build_ext&quot;</span> --global-option=<span class="hljs-string">&quot;-j8&quot;</span> --no-cache -v \ --disable-pip-version-check 2&gt;&amp;1 | <span class="hljs-built_in">tee</span> build.log<!-- HTML_TAG_END --></pre></div> <p>If you intend to use NVMe offload you will also need to include <code>DS_BUILD_AIO=1</code> in the instructions above (and also install <em>libaio-dev</em> system-wide).</p> <p>Edit <code>TORCH_CUDA_ARCH_LIST</code> to insert the code for the architectures of the GPU cards you intend to use. Assuming all your cards are the same you can get the arch via:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->CUDA_VISIBLE_DEVICES=0 python -c <span class="hljs-string">&quot;import torch; print(torch.cuda.get_device_capability())&quot;</span><!-- HTML_TAG_END --></pre></div> <p>So if you get <code>8, 6</code>, then use <code>TORCH_CUDA_ARCH_LIST=&quot;8.6&quot;</code>. If you have multiple different cards, you can list all of them like so <code>TORCH_CUDA_ARCH_LIST=&quot;6.1;8.6&quot;</code></p> <p>If you need to use the same setup on multiple machines, make a binary wheel:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->git <span class="hljs-built_in">clone</span> https://github.com/microsoft/DeepSpeed/ <span class="hljs-built_in">cd</span> DeepSpeed <span class="hljs-built_in">rm</span> -rf build TORCH_CUDA_ARCH_LIST=<span class="hljs-string">&quot;8.6&quot;</span> DS_BUILD_CPU_ADAM=1 DS_BUILD_UTILS=1 \ python setup.py build_ext -j8 bdist_wheel<!-- HTML_TAG_END --></pre></div> <p>it will generate something like <code>dist/deepspeed-0.3.13+8cd046f-cp38-cp38-linux_x86_64.whl</code> which now you can install as <code>pip install deepspeed-0.3.13+8cd046f-cp38-cp38-linux_x86_64.whl</code> locally or on any other machine.</p> <p>Again, remember to ensure to adjust <code>TORCH_CUDA_ARCH_LIST</code> to the target architectures.</p> <p>You can find the complete list of NVIDIA GPUs and their corresponding <strong>Compute Capabilities</strong> (same as arch in this context) <a href="https://developer.nvidia.com/cuda-gpus" rel="nofollow">here</a>.</p> <p>You can check the archs pytorch was built with using:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->python -c <span class="hljs-string">&quot;import torch; print(torch.cuda.get_arch_list())&quot;</span><!-- HTML_TAG_END --></pre></div> <p>Here is how to find out the arch for one of the installed GPUs. For example, for GPU 0:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->CUDA_VISIBLE_DEVICES=0 python -c <span class="hljs-string">&quot;import torch; \ print(torch.cuda.get_device_properties(torch.device(&#x27;cuda&#x27;)))&quot;</span><!-- HTML_TAG_END --></pre></div> <p>If the output is:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->_CudaDeviceProperties(name=<span class="hljs-string">&#x27;GeForce RTX 3090&#x27;</span>, major=8, minor=6, total_memory=24268MB, multi_processor_count=82)<!-- HTML_TAG_END --></pre></div> <p>then you know that this card’s arch is <code>8.6</code>.</p> <p>You can also leave <code>TORCH_CUDA_ARCH_LIST</code> out completely and then the build program will automatically query the architecture of the GPUs the build is made on. This may or may not match the GPUs on the target machines, that’s why it’s best to specify the desired archs explicitly.</p> <p>If after trying everything suggested you still encounter build issues, please, proceed with the GitHub Issue of <a href="https://github.com/microsoft/DeepSpeed/issues" rel="nofollow">Deepspeed</a>,</p> <a id="deepspeed-multi-gpu"></a> <h3 class="relative group"><a id="deployment-with-multiple-gpus" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#deployment-with-multiple-gpus"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Deployment with multiple GPUs </span></h3> <p>To deploy this feature with multiple GPUs adjust the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> command line arguments as following:</p> <ol><li>replace <code>python -m torch.distributed.launch</code> with <code>deepspeed</code>.</li> <li>add a new argument <code>--deepspeed ds_config.json</code>, where <code>ds_config.json</code> is the DeepSpeed configuration file as documented <a href="https://www.deepspeed.ai/docs/config-json/" rel="nofollow">here</a>. The file naming is up to you.</li></ol> <p>Therefore, if your original command line looked as follows:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->python -m torch.distributed.launch --nproc_per_node=2 your_program.py &lt;normal cl args&gt;<!-- HTML_TAG_END --></pre></div> <p>Now it should be:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->deepspeed --num_gpus=2 your_program.py &lt;normal cl args&gt; --deepspeed ds_config.json<!-- HTML_TAG_END --></pre></div> <p>Unlike, <code>torch.distributed.launch</code> where you have to specify how many GPUs to use with <code>--nproc_per_node</code>, with the <code>deepspeed</code> launcher you don’t have to use the corresponding <code>--num_gpus</code> if you want all of your GPUs used. The full details on how to configure various nodes and GPUs can be found <a href="https://www.deepspeed.ai/getting-started/#resource-configuration-multi-node" rel="nofollow">here</a>.</p> <p>In fact, you can continue using <code>-m torch.distributed.launch</code> with DeepSpeed as long as you don’t need to use <code>deepspeed</code> launcher-specific arguments. Typically if you don’t need a multi-node setup you’re not required to use the <code>deepspeed</code> launcher. But since in the DeepSpeed documentation it’ll be used everywhere, for consistency we will use it here as well.</p> <p>Here is an example of running <code>run_translation.py</code> under DeepSpeed deploying all available GPUs:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->deepspeed examples/pytorch/translation/run_translation.py \ --deepspeed tests/deepspeed/ds_config_zero3.json \ --model_name_or_path t5-small --per_device_train_batch_size 1 \ --output_dir output_dir --overwrite_output_dir --fp16 \ --do_train --max_train_samples 500 --num_train_epochs 1 \ --dataset_name wmt16 --dataset_config <span class="hljs-string">&quot;ro-en&quot;</span> \ --source_lang en --target_lang ro<!-- HTML_TAG_END --></pre></div> <p>Note that in the DeepSpeed documentation you are likely to see <code>--deepspeed --deepspeed_config ds_config.json</code> - i.e. two DeepSpeed-related arguments, but for the sake of simplicity, and since there are already so many arguments to deal with, we combined the two into a single argument.</p> <p>For some practical usage examples, please, see this <a href="https://github.com/huggingface/transformers/issues/8771#issuecomment-759248400" rel="nofollow">post</a>.</p> <a id="deepspeed-one-gpu"></a> <h3 class="relative group"><a id="deployment-with-one-gpu" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#deployment-with-one-gpu"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Deployment with one GPU </span></h3> <p>To deploy DeepSpeed with one GPU adjust the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> command line arguments as follows:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->deepspeed --num_gpus=1 examples/pytorch/translation/run_translation.py \ --deepspeed tests/deepspeed/ds_config_zero2.json \ --model_name_or_path t5-small --per_device_train_batch_size 1 \ --output_dir output_dir --overwrite_output_dir --fp16 \ --do_train --max_train_samples 500 --num_train_epochs 1 \ --dataset_name wmt16 --dataset_config <span class="hljs-string">&quot;ro-en&quot;</span> \ --source_lang en --target_lang ro<!-- HTML_TAG_END --></pre></div> <p>This is almost the same as with multiple-GPUs, but here we tell DeepSpeed explicitly to use just one GPU via <code>--num_gpus=1</code>. By default, DeepSpeed deploys all GPUs it can see on the given node. If you have only 1 GPU to start with, then you don’t need this argument. The following <a href="https://www.deepspeed.ai/getting-started/#resource-configuration-multi-node" rel="nofollow">documentation</a> discusses the launcher options.</p> <p>Why would you want to use DeepSpeed with just one GPU?</p> <ol><li>It has a ZeRO-offload feature which can delegate some computations and memory to the host’s CPU and RAM, and thus leave more GPU resources for model’s needs - e.g. larger batch size, or enabling a fitting of a very big model which normally won’t fit.</li> <li>It provides a smart GPU memory management system, that minimizes memory fragmentation, which again allows you to fit bigger models and data batches.</li></ol> <p>While we are going to discuss the configuration in details next, the key to getting a huge improvement on a single GPU with DeepSpeed is to have at least the following configuration in the configuration file:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;zero_optimization&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;stage&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;offload_optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;device&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;cpu&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;pin_memory&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;allgather_partitions&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;allgather_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2e8</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;reduce_scatter&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;reduce_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2e8</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;overlap_comm&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;contiguous_gradients&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>which enables optimizer offload and some other important features. You may experiment with the buffer sizes, you will find more details in the discussion below.</p> <p>For a practical usage example of this type of deployment, please, see this <a href="https://github.com/huggingface/transformers/issues/8771#issuecomment-759176685" rel="nofollow">post</a>.</p> <p>You may also try the ZeRO-3 with CPU and NVMe offload as explained further in this document.</p> <p>Notes:</p> <ul><li><p>if you need to run on a specific GPU, which is different from GPU 0, you can’t use <code>CUDA_VISIBLE_DEVICES</code> to limit the visible scope of available GPUs. Instead, you have to use the following syntax:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->deepspeed --include localhost:1 examples/pytorch/translation/run_translation.py ...<!-- HTML_TAG_END --></pre></div> <p>In this example, we tell DeepSpeed to use GPU 1 (second gpu).</p></li></ul> <a id="deepspeed-notebook"></a> <h3 class="relative group"><a id="deployment-in-notebooks" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#deployment-in-notebooks"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Deployment in Notebooks </span></h3> <p>The problem with running notebook cells as a script is that there is no normal <code>deepspeed</code> launcher to rely on, so under certain setups we have to emulate it.</p> <p>If you’re using only 1 GPU, here is how you’d have to adjust your training code in the notebook to use DeepSpeed.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-comment"># DeepSpeed requires a distributed environment even when only one process is used.</span> <span class="hljs-comment"># This emulates a launcher in the notebook</span> <span class="hljs-keyword">import</span> os os.environ[<span class="hljs-string">&quot;MASTER_ADDR&quot;</span>] = <span class="hljs-string">&quot;localhost&quot;</span> os.environ[<span class="hljs-string">&quot;MASTER_PORT&quot;</span>] = <span class="hljs-string">&quot;9994&quot;</span> <span class="hljs-comment"># modify if RuntimeError: Address already in use</span> os.environ[<span class="hljs-string">&quot;RANK&quot;</span>] = <span class="hljs-string">&quot;0&quot;</span> os.environ[<span class="hljs-string">&quot;LOCAL_RANK&quot;</span>] = <span class="hljs-string">&quot;0&quot;</span> os.environ[<span class="hljs-string">&quot;WORLD_SIZE&quot;</span>] = <span class="hljs-string">&quot;1&quot;</span> <span class="hljs-comment"># Now proceed as normal, plus pass the deepspeed config file</span> training_args = TrainingArguments(..., deepspeed=<span class="hljs-string">&quot;ds_config_zero3.json&quot;</span>) trainer = Trainer(...) trainer.train()<!-- HTML_TAG_END --></pre></div> <p>Note: <code>...</code> stands for the normal arguments that you’d pass to the functions.</p> <p>If you want to use more than 1 GPU, you must use a multi-process environment for DeepSpeed to work. That is, you have to use the launcher for that purpose and this cannot be accomplished by emulating the distributed environment presented at the beginning of this section.</p> <p>If you want to create the config file on the fly in the notebook in the current directory, you could have a dedicated cell with:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->%%bash cat &lt;&lt;<span class="hljs-string">&#x27;EOT&#x27;</span> &gt; ds_config_zero3.json { <span class="hljs-string">&quot;fp16&quot;</span>: { <span class="hljs-string">&quot;enabled&quot;</span>: <span class="hljs-string">&quot;auto&quot;</span>, <span class="hljs-string">&quot;loss_scale&quot;</span>: <span class="hljs-number">0</span>, <span class="hljs-string">&quot;loss_scale_window&quot;</span>: <span class="hljs-number">1000</span>, <span class="hljs-string">&quot;initial_scale_power&quot;</span>: <span class="hljs-number">16</span>, <span class="hljs-string">&quot;hysteresis&quot;</span>: <span class="hljs-number">2</span>, <span class="hljs-string">&quot;min_loss_scale&quot;</span>: <span class="hljs-number">1</span> }, <span class="hljs-string">&quot;optimizer&quot;</span>: { <span class="hljs-string">&quot;type&quot;</span>: <span class="hljs-string">&quot;AdamW&quot;</span>, <span class="hljs-string">&quot;params&quot;</span>: { <span class="hljs-string">&quot;lr&quot;</span>: <span class="hljs-string">&quot;auto&quot;</span>, <span class="hljs-string">&quot;betas&quot;</span>: <span class="hljs-string">&quot;auto&quot;</span>, <span class="hljs-string">&quot;eps&quot;</span>: <span class="hljs-string">&quot;auto&quot;</span>, <span class="hljs-string">&quot;weight_decay&quot;</span>: <span class="hljs-string">&quot;auto&quot;</span> } }, <span class="hljs-string">&quot;scheduler&quot;</span>: { <span class="hljs-string">&quot;type&quot;</span>: <span class="hljs-string">&quot;WarmupLR&quot;</span>, <span class="hljs-string">&quot;params&quot;</span>: { <span class="hljs-string">&quot;warmup_min_lr&quot;</span>: <span class="hljs-string">&quot;auto&quot;</span>, <span class="hljs-string">&quot;warmup_max_lr&quot;</span>: <span class="hljs-string">&quot;auto&quot;</span>, <span class="hljs-string">&quot;warmup_num_steps&quot;</span>: <span class="hljs-string">&quot;auto&quot;</span> } }, <span class="hljs-string">&quot;zero_optimization&quot;</span>: { <span class="hljs-string">&quot;stage&quot;</span>: <span class="hljs-number">3</span>, <span class="hljs-string">&quot;offload_optimizer&quot;</span>: { <span class="hljs-string">&quot;device&quot;</span>: <span class="hljs-string">&quot;cpu&quot;</span>, <span class="hljs-string">&quot;pin_memory&quot;</span>: true }, <span class="hljs-string">&quot;offload_param&quot;</span>: { <span class="hljs-string">&quot;device&quot;</span>: <span class="hljs-string">&quot;cpu&quot;</span>, <span class="hljs-string">&quot;pin_memory&quot;</span>: true }, <span class="hljs-string">&quot;overlap_comm&quot;</span>: true, <span class="hljs-string">&quot;contiguous_gradients&quot;</span>: true, <span class="hljs-string">&quot;sub_group_size&quot;</span>: <span class="hljs-number">1e9</span>, <span class="hljs-string">&quot;reduce_bucket_size&quot;</span>: <span class="hljs-string">&quot;auto&quot;</span>, <span class="hljs-string">&quot;stage3_prefetch_bucket_size&quot;</span>: <span class="hljs-string">&quot;auto&quot;</span>, <span class="hljs-string">&quot;stage3_param_persistence_threshold&quot;</span>: <span class="hljs-string">&quot;auto&quot;</span>, <span class="hljs-string">&quot;stage3_max_live_parameters&quot;</span>: <span class="hljs-number">1e9</span>, <span class="hljs-string">&quot;stage3_max_reuse_distance&quot;</span>: <span class="hljs-number">1e9</span>, <span class="hljs-string">&quot;stage3_gather_16bit_weights_on_model_save&quot;</span>: true }, <span class="hljs-string">&quot;gradient_accumulation_steps&quot;</span>: <span class="hljs-string">&quot;auto&quot;</span>, <span class="hljs-string">&quot;gradient_clipping&quot;</span>: <span class="hljs-string">&quot;auto&quot;</span>, <span class="hljs-string">&quot;steps_per_print&quot;</span>: <span class="hljs-number">2000</span>, <span class="hljs-string">&quot;train_batch_size&quot;</span>: <span class="hljs-string">&quot;auto&quot;</span>, <span class="hljs-string">&quot;train_micro_batch_size_per_gpu&quot;</span>: <span class="hljs-string">&quot;auto&quot;</span>, <span class="hljs-string">&quot;wall_clock_breakdown&quot;</span>: false } EOT<!-- HTML_TAG_END --></pre></div> <p>If the training script is in a normal file and not in the notebook cells, you can launch <code>deepspeed</code> normally via shell from a cell. For example, to use <code>run_translation.py</code> you would launch it with:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->!git clone https://github.com/huggingface/transformers !cd transformers; deepspeed examples/pytorch/translation/run_translation.py ...<!-- HTML_TAG_END --></pre></div> <p>or with <code>%%bash</code> magic, where you can write a multi-line code for the shell program to run:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->%%bash git clone https://github.com/huggingface/transformers cd transformers deepspeed examples/pytorch/translation/run_translation.py ...<!-- HTML_TAG_END --></pre></div> <p>In such case you don’t need any of the code presented at the beginning of this section.</p> <p>Note: While <code>%%bash</code> magic is neat, but currently it buffers the output so you won’t see the logs until the process completes.</p> <a id="deepspeed-config"></a> <h3 class="relative group"><a id="configuration" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#configuration"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Configuration </span></h3> <p>For the complete guide to the DeepSpeed configuration options that can be used in its configuration file please refer to the <a href="https://www.deepspeed.ai/docs/config-json/" rel="nofollow">following documentation</a>.</p> <p>You can find dozens of DeepSpeed configuration examples that address various practical needs in <a href="https://github.com/microsoft/DeepSpeedExamples" rel="nofollow">the DeepSpeedExamples repo</a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->git <span class="hljs-built_in">clone</span> https://github.com/microsoft/DeepSpeedExamples <span class="hljs-built_in">cd</span> DeepSpeedExamples find . -name <span class="hljs-string">&#x27;*json&#x27;</span><!-- HTML_TAG_END --></pre></div> <p>Continuing the code from above, let’s say you’re looking to configure the Lamb optimizer. So you can search through the example <code>.json</code> files with:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->grep -i Lamb $(find . -name <span class="hljs-string">&#x27;*json&#x27;</span>)<!-- HTML_TAG_END --></pre></div> <p>Some more examples are to be found in the <a href="https://github.com/microsoft/DeepSpeed" rel="nofollow">main repo</a> as well.</p> <p>When using DeepSpeed you always need to supply a DeepSpeed configuration file, yet some configuration parameters have to be configured via the command line. You will find the nuances in the rest of this guide.</p> <p>To get an idea of what DeepSpeed configuration file looks like, here is one that activates ZeRO stage 2 features, including optimizer states cpu offload, uses <code>AdamW</code> optimizer and <code>WarmupLR</code> scheduler and will enable mixed precision training if <code>--fp16</code> is passed:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;fp16&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;enabled&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">0</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale_window&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1000</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;initial_scale_power&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">16</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;hysteresis&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;min_loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;type&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;AdamW&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;params&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;betas&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;eps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;weight_decay&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;scheduler&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;type&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;WarmupLR&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;params&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;warmup_min_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_max_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_num_steps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;zero_optimization&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;stage&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;offload_optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;device&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;cpu&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;pin_memory&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;allgather_partitions&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;allgather_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2e8</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;overlap_comm&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;reduce_scatter&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;reduce_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2e8</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;contiguous_gradients&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;gradient_accumulation_steps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;gradient_clipping&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;train_batch_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;train_micro_batch_size_per_gpu&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>When you execute the program, DeepSpeed will log the configuration it received from the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> to the console, so you can see exactly what was the final configuration passed to it.</p> <a id="deepspeed-config-passing"></a> <h3 class="relative group"><a id="passing-configuration" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#passing-configuration"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Passing Configuration </span></h3> <p>As discussed in this document normally the DeepSpeed configuration is passed as a path to a json file, but if you’re not using the command line interface to configure the training, and instead instantiate the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> via <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a> then for the <code>deepspeed</code> argument you can pass a nested <code>dict</code>. This allows you to create the configuration on the fly and doesn’t require you to write it to the file system before passing it to <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a>.</p> <p>To summarize you can do:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->TrainingArguments(..., deepspeed=<span class="hljs-string">&quot;/path/to/ds_config.json&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>or:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->ds_config_dict = <span class="hljs-built_in">dict</span>(scheduler=scheduler_params, optimizer=optimizer_params) TrainingArguments(..., deepspeed=ds_config_dict)<!-- HTML_TAG_END --></pre></div> <a id="deepspeed-config-shared"></a> <h3 class="relative group"><a id="shared-configuration" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#shared-configuration"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Shared Configuration </span></h3> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"><p>This section is a must-read</p></div> <p>Some configuration values are required by both the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> and DeepSpeed to function correctly, therefore, to prevent conflicting definitions, which could lead to hard to detect errors, we chose to configure those via the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> command line arguments.</p> <p>Additionally, some configuration values are derived automatically based on the model’s configuration, so instead of remembering to manually adjust multiple values, it’s the best to let the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> do the majority of configuration for you.</p> <p>Therefore, in the rest of this guide you will find a special configuration value: <code>auto</code>, which when set will be automatically replaced with the correct or most efficient value. Please feel free to choose to ignore this recommendation and set the values explicitly, in which case be very careful that your the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> arguments and DeepSpeed configurations agree. For example, are you using the same learning rate, or batch size, or gradient accumulation settings? if these mismatch the training may fail in very difficult to detect ways. You have been warned.</p> <p>There are multiple other values that are specific to DeepSpeed-only and those you will have to set manually to suit your needs.</p> <p>In your own programs, you can also use the following approach if you’d like to modify the DeepSpeed config as a master and configure <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a> based on that. The steps are:</p> <ol><li>Create or load the DeepSpeed configuration to be used as a master configuration</li> <li>Create the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a> object based on these values</li></ol> <p>Do note that some values, such as <code>scheduler.params.total_num_steps</code> are calculated by <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> during <code>train</code>, but you can of course do the math yourself.</p> <a id="deepspeed-zero"></a> <h3 class="relative group"><a id="zero" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#zero"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ZeRO </span></h3> <p><a href="https://www.deepspeed.ai/tutorials/zero/" rel="nofollow">Zero Redundancy Optimizer (ZeRO)</a> is the workhorse of DeepSpeed. It supports 3 different levels (stages) of optimization. The first one is not quite interesting for scalability purposes, therefore this document focuses on stages 2 and 3. Stage 3 is further improved by the latest addition of ZeRO-Infinity. You will find more indepth information in the DeepSpeed documentation.</p> <p>The <code>zero_optimization</code> section of the configuration file is the most important part (<a href="https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training" rel="nofollow">docs</a>), since that is where you define which ZeRO stages you want to enable and how to configure them. You will find the explanation for each parameter in the DeepSpeed docs.</p> <p>This section has to be configured exclusively via DeepSpeed configuration - the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> provides no equivalent command line arguments.</p> <p>Note: currently DeepSpeed doesn’t validate parameter names, so if you misspell any, it’ll use the default setting for the parameter that got misspelled. You can watch the DeepSpeed engine start up log messages to see what values it is going to use.</p> <a id="deepspeed-zero2-config"></a> <h4 class="relative group"><a id="zero2-config" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#zero2-config"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ZeRO-2 Config </span></h4> <p>The following is an example of configuration for ZeRO stage 2:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;zero_optimization&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;stage&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;offload_optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;device&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;cpu&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;pin_memory&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;allgather_partitions&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;allgather_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">5e8</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;overlap_comm&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;reduce_scatter&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;reduce_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">5e8</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;contiguous_gradients&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p><strong>Performance tuning:</strong></p> <ul><li>enabling <code>offload_optimizer</code> should reduce GPU RAM usage (it requires <code>&quot;stage&quot;: 2</code>)</li> <li><code>&quot;overlap_comm&quot;: true</code> trades off increased GPU RAM usage to lower all-reduce latency. <code>overlap_comm</code> uses 4.5x the <code>allgather_bucket_size</code> and <code>reduce_bucket_size</code> values. So if they are set to 5e8, this requires a 9GB footprint (<code>5e8 x 2Bytes x 2 x 4.5</code>). Therefore, if you have a GPU with 8GB or less RAM, to avoid getting OOM-errors you will need to reduce those parameters to about <code>2e8</code>, which would require 3.6GB. You will want to do the same on larger capacity GPU as well, if you’re starting to hit OOM.</li> <li>when reducing these buffers you’re trading communication speed to avail more GPU RAM. The smaller the buffer size is, the slower the communication gets, and the more GPU RAM will be available to other tasks. So if a bigger batch size is important, getting a slightly slower training time could be a good trade.</li></ul> <p>Additionally, <code>deepspeed==0.4.4</code> added a new option <code>round_robin_gradients</code> which you can enable with:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;zero_optimization&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;round_robin_gradients&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>This is a stage 2 optimization for CPU offloading that parallelizes gradient copying to CPU memory among ranks by fine-grained gradient partitioning. Performance benefit grows with gradient accumulation steps (more copying between optimizer steps) or GPU count (increased parallelism).</p> <a id="deepspeed-zero3-config"></a> <h4 class="relative group"><a id="zero3-config" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#zero3-config"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ZeRO-3 Config </span></h4> <p>The following is an example of configuration for ZeRO stage 3:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;zero_optimization&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;stage&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">3</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;offload_optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;device&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;cpu&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;pin_memory&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;offload_param&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;device&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;cpu&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;pin_memory&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;overlap_comm&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;contiguous_gradients&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;sub_group_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e9</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;reduce_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_prefetch_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_param_persistence_threshold&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_max_live_parameters&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e9</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_max_reuse_distance&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e9</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_gather_16bit_weights_on_model_save&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>If you are getting OOMs, because your model or activations don’t fit into the GPU memory and you have unutilized CPU memory offloading the optimizer states and parameters to CPU memory with <code>&quot;device&quot;: &quot;cpu&quot;</code> may solve this limitation. If you don’t want to offload to CPU memory, use <code>none</code> instead of <code>cpu</code> for the <code>device</code> entry. Offloading to NVMe is discussed further down.</p> <p>Pinned memory is enabled with <code>pin_memory</code> set to <code>true</code>. This feature can improve the throughput at the cost of making less memory available to other processes. Pinned memory is set aside to the specific process that requested it and its typically accessed much faster than normal CPU memory.</p> <p><strong>Performance tuning:</strong></p> <ul><li><code>stage3_max_live_parameters</code>: <code>1e9</code></li> <li><code>stage3_max_reuse_distance</code>: <code>1e9</code></li></ul> <p>If hitting OOM reduce <code>stage3_max_live_parameters</code> and <code>stage3_max_reuse_distance</code>. They should have minimal impact on performance unless you are doing activation checkpointing. <code>1e9</code> would consume ~2GB. The memory is shared by <code>stage3_max_live_parameters</code> and <code>stage3_max_reuse_distance</code>, so it’s not additive, it’s just 2GB total.</p> <p><code>stage3_max_live_parameters</code> is the upper limit on how many full parameters you want to keep on the GPU at any given time. “reuse distance” is a metric we are using to figure out when will a parameter be used again in the future, and we use the <code>stage3_max_reuse_distance</code> to decide whether to throw away the parameter or to keep it. If a parameter is going to be used again in near future (less than <code>stage3_max_reuse_distance</code>) then we keep it to reduce communication overhead. This is super helpful when you have activation checkpointing enabled, where we do a forward recompute and backward passes a a single layer granularity and want to keep the parameter in the forward recompute till the backward</p> <p>The following configuration values depend on the model’s hidden size:</p> <ul><li><code>reduce_bucket_size</code>: <code>hidden_size*hidden_size</code></li> <li><code>stage3_prefetch_bucket_size</code>: <code>0.9 * hidden_size * hidden_size</code></li> <li><code>stage3_param_persistence_threshold</code>: <code>10 * hidden_size</code></li></ul> <p>therefore set these values to <code>auto</code> and the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> will automatically assign the recommended values. But, of course, feel free to set these explicitly as well.</p> <p><code>stage3_gather_16bit_weights_on_model_save</code> enables model fp16 weights consolidation when model gets saved. With large models and multiple GPUs this is an expensive operation both in terms of memory and speed. It’s currently required if you plan to resume the training. Watch out for future updates that will remove this limitation and make things more flexible.</p> <p>If you’re migrating from ZeRO-2 configuration note that <code>allgather_partitions</code>, <code>allgather_bucket_size</code> and <code>reduce_scatter</code> configuration parameters are not used in ZeRO-3. If you keep these in the config file they will just be ignored.</p> <ul><li><code>sub_group_size</code>: <code>1e9</code></li></ul> <p><code>sub_group_size</code> controls the granularity in which parameters are updated during optimizer steps. Parameters are grouped into buckets of <code>sub_group_size</code> and each buckets is updated one at a time. When used with NVMe offload in ZeRO-Infinity, <code>sub_group_size</code> therefore controls the granularity in which model states are moved in and out of CPU memory from NVMe during the optimizer step. This prevents running out of CPU memory for extremely large models.</p> <p>You can leave <code>sub_group_size</code> to its default value of <em>1e9</em> when not using NVMe offload. You may want to change its default value in the following cases:</p> <ol><li>Running into OOM during optimizer step: Reduce <code>sub_group_size</code> to reduce memory utilization of temporary buffers</li> <li>Optimizer Step is taking a long time: Increase <code>sub_group_size</code> to improve bandwidth utilization as a result of the increased data buffers.</li></ol> <a id="deepspeed-nvme"></a> <h3 class="relative group"><a id="nvme-support" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#nvme-support"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>NVMe Support </span></h3> <p>ZeRO-Infinity allows for training incredibly large models by extending GPU and CPU memory with NVMe memory. Thanks to smart partitioning and tiling algorithms each GPU needs to send and receive very small amounts of data during offloading so modern NVMe proved to be fit to allow for an even larger total memory pool available to your training process. ZeRO-Infinity requires ZeRO-3 enabled.</p> <p>The following configuration example enables NVMe to offload both optimizer states and the params:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;zero_optimization&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;stage&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">3</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;offload_optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;device&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;nvme&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;nvme_path&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;/local_nvme&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;pin_memory&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;buffer_count&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">4</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;fast_init&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">false</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;offload_param&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;device&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;nvme&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;nvme_path&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;/local_nvme&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;pin_memory&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;buffer_count&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">5</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;buffer_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e8</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;max_in_cpu&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e9</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;aio&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;block_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">262144</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;queue_depth&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">32</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;thread_count&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;single_submit&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">false</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;overlap_events&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;overlap_comm&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;contiguous_gradients&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;sub_group_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e9</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;reduce_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_prefetch_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_param_persistence_threshold&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_max_live_parameters&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e9</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_max_reuse_distance&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e9</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_gather_16bit_weights_on_model_save&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>You can choose to offload both optimizer states and params to NVMe, or just one of them or none. For example, if you have copious amounts of CPU memory available, by all means offload to CPU memory only as it’d be faster (hint: <em>“device”: “cpu”</em>).</p> <p>Here is the full documentation for offloading <a href="https://www.deepspeed.ai/docs/config-json/#optimizer-offloading" rel="nofollow">optimizer states</a> and <a href="https://www.deepspeed.ai/docs/config-json/#parameter-offloading" rel="nofollow">parameters</a>.</p> <p>Make sure that your <code>nvme_path</code> is actually an NVMe, since it will work with the normal hard drive or SSD, but it’ll be much much slower. The fast scalable training was designed with modern NVMe transfer speeds in mind (as of this writing one can have ~3.5GB/s read, ~3GB/s write peak speeds).</p> <p>In order to figure out the optimal <code>aio</code> configuration block you must run a benchmark on your target setup, as <a href="https://github.com/microsoft/DeepSpeed/issues/998" rel="nofollow">explained here</a>.</p> <a id="deepspeed-zero2-zero3-performance"></a> <h4 class="relative group"><a id="zero2-vs-zero3-performance" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#zero2-vs-zero3-performance"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ZeRO-2 vs ZeRO-3 Performance </span></h4> <p>ZeRO-3 is likely to be slower than ZeRO-2 if everything else is configured the same because the former has to gather model weights in addition to what ZeRO-2 does. If ZeRO-2 meets your needs and you don’t need to scale beyond a few GPUs then you may choose to stick to it. It’s important to understand that ZeRO-3 enables a much higher scalability capacity at a cost of speed.</p> <p>It’s possible to adjust ZeRO-3 configuration to make it perform closer to ZeRO-2:</p> <ul><li>set <code>stage3_param_persistence_threshold</code> to a very large number - larger than the largest parameter, e.g., <code>6 * hidden_size * hidden_size</code>. This will keep the parameters on the GPUs.</li> <li>turn off <code>offload_params</code> since ZeRO-2 doesn’t have that option.</li></ul> <p>The performance will likely improve significantly with just <code>offload_params</code> turned off, even if you don’t change <code>stage3_param_persistence_threshold</code>. Of course, these changes will impact the size of the model you can train. So these help you to trade scalability for speed depending on your needs.</p> <a id="deepspeed-zero2-example"></a> <h4 class="relative group"><a id="zero2-example" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#zero2-example"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ZeRO-2 Example </span></h4> <p>Here is a full ZeRO-2 auto-configuration file <code>ds_config_zero2.json</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;fp16&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;enabled&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">0</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale_window&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1000</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;initial_scale_power&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">16</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;hysteresis&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;min_loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;type&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;AdamW&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;params&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;betas&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;eps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;weight_decay&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;scheduler&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;type&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;WarmupLR&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;params&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;warmup_min_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_max_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_num_steps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;zero_optimization&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;stage&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;offload_optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;device&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;cpu&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;pin_memory&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;allgather_partitions&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;allgather_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2e8</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;overlap_comm&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;reduce_scatter&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;reduce_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2e8</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;contiguous_gradients&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;gradient_accumulation_steps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;gradient_clipping&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;steps_per_print&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2000</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;train_batch_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;train_micro_batch_size_per_gpu&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;wall_clock_breakdown&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">false</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>Here is a full ZeRO-2 all-enabled manually set configuration file. It is here mainly for you to see what the typical values look like, but we highly recommend using the one with multiple <code>auto</code> settings in it.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;fp16&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;enabled&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">0</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale_window&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1000</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;initial_scale_power&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">16</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;hysteresis&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;min_loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;type&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;AdamW&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;params&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">3e-5</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;betas&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">[</span><span class="hljs-number">0.8</span><span class="hljs-punctuation">,</span> <span class="hljs-number">0.999</span><span class="hljs-punctuation">]</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;eps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e-8</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;weight_decay&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">3e-7</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;scheduler&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;type&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;WarmupLR&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;params&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;warmup_min_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">0</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_max_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">3e-5</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_num_steps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">500</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;zero_optimization&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;stage&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;offload_optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;device&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;cpu&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;pin_memory&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;allgather_partitions&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;allgather_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2e8</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;overlap_comm&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;reduce_scatter&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;reduce_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2e8</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;contiguous_gradients&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;steps_per_print&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2000</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;wall_clock_breakdown&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">false</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <a id="deepspeed-zero3-example"></a> <h4 class="relative group"><a id="zero3-example" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#zero3-example"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ZeRO-3 Example </span></h4> <p>Here is a full ZeRO-3 auto-configuration file <code>ds_config_zero3.json</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;fp16&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;enabled&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">0</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale_window&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1000</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;initial_scale_power&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">16</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;hysteresis&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;min_loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;type&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;AdamW&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;params&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;betas&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;eps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;weight_decay&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;scheduler&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;type&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;WarmupLR&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;params&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;warmup_min_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_max_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_num_steps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;zero_optimization&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;stage&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">3</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;offload_optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;device&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;cpu&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;pin_memory&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;offload_param&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;device&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;cpu&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;pin_memory&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;overlap_comm&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;contiguous_gradients&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;sub_group_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e9</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;reduce_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_prefetch_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_param_persistence_threshold&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_max_live_parameters&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e9</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_max_reuse_distance&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e9</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_gather_16bit_weights_on_model_save&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;gradient_accumulation_steps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;gradient_clipping&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;steps_per_print&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2000</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;train_batch_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;train_micro_batch_size_per_gpu&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;wall_clock_breakdown&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">false</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>Here is a full ZeRO-3 all-enabled manually set configuration file. It is here mainly for you to see what the typical values look like, but we highly recommend using the one with multiple <code>auto</code> settings in it.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;fp16&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;enabled&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">0</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale_window&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1000</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;initial_scale_power&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">16</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;hysteresis&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;min_loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;type&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;AdamW&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;params&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">3e-5</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;betas&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">[</span><span class="hljs-number">0.8</span><span class="hljs-punctuation">,</span> <span class="hljs-number">0.999</span><span class="hljs-punctuation">]</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;eps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e-8</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;weight_decay&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">3e-7</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;scheduler&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;type&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;WarmupLR&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;params&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;warmup_min_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">0</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_max_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">3e-5</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_num_steps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">500</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;zero_optimization&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;stage&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">3</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;offload_optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;device&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;cpu&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;pin_memory&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;offload_param&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;device&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;cpu&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;pin_memory&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;overlap_comm&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;contiguous_gradients&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;sub_group_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e9</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;reduce_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e6</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_prefetch_bucket_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">0.94e6</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_param_persistence_threshold&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e4</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_max_live_parameters&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e9</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_max_reuse_distance&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e9</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;stage3_gather_16bit_weights_on_model_save&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;steps_per_print&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2000</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;wall_clock_breakdown&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">false</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="optimizer-and-scheduler" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#optimizer-and-scheduler"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Optimizer and Scheduler </span></h3> <p>As long as you don’t enable <code>offload_optimizer</code> you can mix and match DeepSpeed and HuggingFace schedulers and optimizers, with the exception of using the combination of HuggingFace scheduler and DeepSpeed optimizer:</p> <p>| Combos | HF Scheduler | DS Scheduler | | HF Optimizer | Yes | Yes | | DS Optimizer | No | Yes |</p> <p>It is possible to use a non-DeepSpeed optimizer when <code>offload_optimizer</code> is enabled, as long as it has both CPU and GPU implementation (except LAMB).</p> <a id="deepspeed-optimizer"></a> <h4 class="relative group"><a id="optimizer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#optimizer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Optimizer </span></h4> <p>DeepSpeed’s main optimizers are Adam, AdamW, OneBitAdam, and Lamb. These have been thoroughly tested with ZeRO and are thus recommended to be used. It, however, can import other optimizers from <code>torch</code>. The full documentation is <a href="https://www.deepspeed.ai/docs/config-json/#optimizer-parameters" rel="nofollow">here</a>.</p> <p>If you don’t configure the <code>optimizer</code> entry in the configuration file, the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> will automatically set it to <code>AdamW</code> and will use the supplied values or the defaults for the following command line arguments: <code>--learning_rate</code>, <code>--adam_beta1</code>, <code>--adam_beta2</code>, <code>--adam_epsilon</code> and <code>--weight_decay</code>.</p> <p>Here is an example of the auto-configured <code>optimizer</code> entry for <code>AdamW</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;type&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;AdamW&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;params&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;betas&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;eps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;weight_decay&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>Note that the command line arguments will set the values in the configuration file. This is so that there is one definitive source of the values and to avoid hard to find errors when for example, the learning rate is set to different values in different places. Command line rules. The values that get overridden are:</p> <ul><li><code>lr</code> with the value of <code>--learning_rate</code></li> <li><code>betas</code> with the value of <code>--adam_beta1 --adam_beta2</code></li> <li><code>eps</code> with the value of <code>--adam_epsilon</code></li> <li><code>weight_decay</code> with the value of <code>--weight_decay</code></li></ul> <p>Therefore please remember to tune the shared hyperparameters on the command line.</p> <p>You can also set the values explicitly:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;type&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;AdamW&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;params&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">0.001</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;betas&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">[</span><span class="hljs-number">0.8</span><span class="hljs-punctuation">,</span> <span class="hljs-number">0.999</span><span class="hljs-punctuation">]</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;eps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1e-8</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;weight_decay&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">3e-7</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>But then you’re on your own synchronizing the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> command line arguments and the DeepSpeed configuration.</p> <p>If you want to use another optimizer which is not listed above, you will have to add to the top level configuration.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;zero_allow_untested_optimizer&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>Similarly to <code>AdamW</code>, you can configure other officially supported optimizers. Just remember that may have different config values. e.g. for Adam you will want <code>weight_decay</code> around <code>0.01</code>.</p> <a id="deepspeed-scheduler"></a> <h4 class="relative group"><a id="scheduler" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#scheduler"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Scheduler </span></h4> <p>DeepSpeed supports <code>LRRangeTest</code>, <code>OneCycle</code>, <code>WarmupLR</code> and <code>WarmupDecayLR</code> learning rate schedulers. The full documentation is <a href="https://www.deepspeed.ai/docs/config-json/#scheduler-parameters" rel="nofollow">here</a>.</p> <p>Here is where the schedulers overlap between 🤗 Transformers and DeepSpeed:</p> <ul><li><code>WarmupLR</code> via <code>--lr_scheduler_type constant_with_warmup</code></li> <li><code>WarmupDecayLR</code> via <code>--lr_scheduler_type linear</code>. This is also the default value for <code>--lr_scheduler_type</code>, therefore, if you don’t configure the scheduler this is scheduler that will get configured by default.</li></ul> <p>If you don’t configure the <code>scheduler</code> entry in the configuration file, the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> will use the values of <code>--lr_scheduler_type</code>, <code>--learning_rate</code> and <code>--warmup_steps</code> or <code>--warmup_ratio</code> to configure a 🤗 Transformers version of it.</p> <p>Here is an example of the auto-configured <code>scheduler</code> entry for <code>WarmupLR</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;scheduler&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;type&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;WarmupLR&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;params&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;warmup_min_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_max_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_num_steps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>Since <em>“auto”</em> is used the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> arguments will set the correct values in the configuration file. This is so that there is one definitive source of the values and to avoid hard to find errors when, for example, the learning rate is set to different values in different places. Command line rules. The values that get set are:</p> <ul><li><code>warmup_min_lr</code> with the value of <code>0</code>.</li> <li><code>warmup_max_lr</code> with the value of <code>--learning_rate</code>.</li> <li><code>warmup_num_steps</code> with the value of <code>--warmup_steps</code> if provided. Otherwise will use <code>--warmup_ratio</code> multiplied by the number of training steps and rounded up.</li> <li><code>total_num_steps</code> with either the value of <code>--max_steps</code> or if it is not provided, derived automatically at run time based on the environment and the size of the dataset and other command line arguments (needed for <code>WarmupDecayLR</code>).</li></ul> <p>You can, of course, take over any or all of the configuration values and set those yourself:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;scheduler&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;type&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;WarmupLR&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;params&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;warmup_min_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">0</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_max_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">0.001</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_num_steps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1000</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>But then you’re on your own synchronizing the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> command line arguments and the DeepSpeed configuration.</p> <p>For example, for <code>WarmupDecayLR</code>, you can use the following entry:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;scheduler&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;type&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;WarmupDecayLR&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;params&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;last_batch_iteration&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">-1</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;total_num_steps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_min_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_max_lr&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;warmup_num_steps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>and <code>total_num_steps</code>, <code>warmup_max_lr</code>, <code>warmup_num_steps</code> and <code>total_num_steps</code> will be set at loading time.</p> <a id="deepspeed-fp32"></a> <h3 class="relative group"><a id="fp32-precision" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#fp32-precision"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>fp32 Precision </span></h3> <p>Deepspeed supports the full fp32 and the fp16 mixed precision.</p> <p>Because of the much reduced memory needs and faster speed one gets with the fp16 mixed precision, the only time you will want to not use it is when the model you’re using doesn’t behave well under this training mode. Typically this happens when the model wasn’t pretrained in the fp16 mixed precision (e.g. often this happens with bf16-pretrained models). Such models may overflow or underflow leading to <code>NaN</code> loss. If this is your case then you will want to use the full fp32 mode, by explicitly disabling the otherwise default fp16 mixed precision mode with:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;fp16&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;enabled&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;false&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>If you’re using the Ampere-architecture based GPU, pytorch version 1.7 and higher will automatically switch to using the much more efficient tf32 format for some operations, but the results will still be in fp32. For details and benchmarks, please, see <a href="https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" rel="nofollow">TensorFloat-32(TF32) on Ampere devices</a>. The document includes instructions on how to disable this automatic conversion if for some reason you prefer not to use it.</p> <p>With the 🤗 Trainer you can use <code>--tf32</code> to enable it, or disable it with <code>--tf32 0</code> or <code>--no_tf32</code>. By default the PyTorch default is used.</p> <a id="deepspeed-amp"></a> <h3 class="relative group"><a id="automatic-mixed-precision" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#automatic-mixed-precision"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Automatic Mixed Precision </span></h3> <p>You can use automatic mixed precision with either a pytorch-like AMP way or the apex-like way:</p> <h3 class="relative group"><a id="fp16" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#fp16"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>fp16 </span></h3> <p>To configure pytorch AMP-like mode with fp16 (float16) set:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;fp16&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;enabled&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">0</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale_window&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1000</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;initial_scale_power&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">16</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;hysteresis&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;min_loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>and the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> will automatically enable or disable it based on the value of <code>args.fp16_backend</code>. The rest of config values are up to you.</p> <p>This mode gets enabled when <code>--fp16 --fp16_backend amp</code> or <code>--fp16_full_eval</code> command line args are passed.</p> <p>You can also enable/disable this mode explicitly:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;fp16&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;enabled&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">0</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale_window&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1000</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;initial_scale_power&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">16</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;hysteresis&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;min_loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>But then you’re on your own synchronizing the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> command line arguments and the DeepSpeed configuration.</p> <p>Here is the <a href="https://www.deepspeed.ai/docs/config-json/#fp16-training-options" rel="nofollow">documentation</a>.</p> <h3 class="relative group"><a id="bf16" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#bf16"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>bf16 </span></h3> <p>If bf16 (bfloat16) is desired instead of fp16 then the following configuration section is to be used:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;bf16&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;enabled&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>bf16 has the same dynamic range as fp32 and thus doesn’t require loss scaling.</p> <p>This mode gets enabled when <code>--bf16</code> or <code>--bf16_full_eval</code> command line args are passed.</p> <p>You can also enable/disable this mode explicitly:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;bf16&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;enabled&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>As of <code>deepspeed==0.6.0</code> the bf16 support is new and experimental.</p> <p>If you use <a href="#gradient-accumulation">gradient accumulation</a> with bf16-enabled, you need to be aware that it’ll accumulate gradients in bf16, which may not be what you want due to this format’s low precision, as it may lead to a lossy accumulation.</p></div> <h3 class="relative group"><a id="apex" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#apex"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>apex </span></h3> <p>To configure apex AMP-like mode set:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-attr">&quot;amp&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;enabled&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;opt_level&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>and the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> will automatically configure it based on the values of <code>args.fp16_backend</code> and <code>args.fp16_opt_level</code>.</p> <p>This mode gets enabled when <code>--fp16 --fp16_backend apex --fp16_opt_level 01</code> command line args are passed.</p> <p>You can also configure this mode explicitly:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;amp&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;enabled&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;opt_level&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;O1&quot;</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>But then you’re on your own synchronizing the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> command line arguments and the DeepSpeed configuration.</p> <p>Here is the <a href="https://www.deepspeed.ai/docs/config-json/#automatic-mixed-precision-amp-training-options" rel="nofollow">documentation</a>.</p> <a id="deepspeed-bs"></a> <h3 class="relative group"><a id="batch-size" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#batch-size"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Batch Size </span></h3> <p>To configure batch size, use:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;train_batch_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;train_micro_batch_size_per_gpu&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>and the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> will automatically set <code>train_micro_batch_size_per_gpu</code> to the value of <code>args.per_device_train_batch_size</code> and <code>train_batch_size</code> to <code>args.world_size * args.per_device_train_batch_size * args.gradient_accumulation_steps</code>.</p> <p>You can also set the values explicitly:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;train_batch_size&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">12</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;train_micro_batch_size_per_gpu&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">4</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>But then you’re on your own synchronizing the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> command line arguments and the DeepSpeed configuration.</p> <a id="deepspeed-grad-acc"></a> <h3 class="relative group"><a id="gradient-accumulation" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#gradient-accumulation"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Gradient Accumulation </span></h3> <p>To configure gradient accumulation set:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;gradient_accumulation_steps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>and the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> will automatically set it to the value of <code>args.gradient_accumulation_steps</code>.</p> <p>You can also set the value explicitly:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;gradient_accumulation_steps&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">3</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>But then you’re on your own synchronizing the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> command line arguments and the DeepSpeed configuration.</p> <a id="deepspeed-grad-clip"></a> <h3 class="relative group"><a id="gradient-clipping" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#gradient-clipping"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Gradient Clipping </span></h3> <p>To configure gradient gradient clipping set:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;gradient_clipping&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>and the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> will automatically set it to the value of <code>args.max_grad_norm</code>.</p> <p>You can also set the value explicitly:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;gradient_clipping&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1.0</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>But then you’re on your own synchronizing the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> command line arguments and the DeepSpeed configuration.</p> <a id="deepspeed-weight-extraction"></a> <h3 class="relative group"><a id="getting-the-model-weights-out" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#getting-the-model-weights-out"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Getting The Model Weights Out </span></h3> <p>As long as you continue training and resuming using DeepSpeed you don’t need to worry about anything. DeepSpeed stores fp32 master weights in its custom checkpoint optimizer files, which are <code>global_step*/*optim_states.pt</code> (this is glob pattern), and are saved under the normal checkpoint.</p> <p><strong>FP16 Weights:</strong></p> <p>When a model is saved under ZeRO-2, you end up having the normal <code>pytorch_model.bin</code> file with the model weights, but they are only the fp16 version of the weights.</p> <p>Under ZeRO-3, things are much more complicated, since the model weights are partitioned out over multiple GPUs, therefore <code>&quot;stage3_gather_16bit_weights_on_model_save&quot;: true</code> is required to get the <code>Trainer</code> to save the fp16 version of the weights. If this setting is <code>False</code> <code>pytorch_model.bin</code> won’t be created. This is because by default DeepSpeed’s <code>state_dict</code> contains a placeholder and not the real weights. If we were to save this <code>state_dict</code> it won’t be possible to load it back.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;zero_optimization&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;stage3_gather_16bit_weights_on_model_save&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-keyword">true</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p><strong>FP32 Weights:</strong></p> <p>While the fp16 weights are fine for resuming training, if you finished finetuning your model and want to upload it to the <a href="https://huggingface.co/models" rel="nofollow">models hub</a> or pass it to someone else you most likely will want to get the fp32 weights. This ideally shouldn’t be done during training since this is a process that requires a lot of memory, and therefore best to be performed offline after the training is complete. But if desired and you have plenty of free CPU memory it can be done in the same training script. The following sections will discuss both approaches.</p> <p><strong>Live FP32 Weights Recovery:</strong></p> <p>This approach may not work if you model is large and you have little free CPU memory left, at the end of the training.</p> <p>If you have saved at least one checkpoint, and you want to use the latest one, you can do the following:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers.trainer_utils <span class="hljs-keyword">import</span> get_last_checkpoint <span class="hljs-keyword">from</span> deepspeed.utils.zero_to_fp32 <span class="hljs-keyword">import</span> load_state_dict_from_zero_checkpoint checkpoint_dir = get_last_checkpoint(trainer.args.output_dir) fp32_model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)<!-- HTML_TAG_END --></pre></div> <p>If you’re using the <code>--load_best_model_at_end</code> class:<em>~transformers.TrainingArguments</em> argument (to track the best checkpoint), then you can finish the training by first saving the final model explicitly and then do the same as above:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> deepspeed.utils.zero_to_fp32 <span class="hljs-keyword">import</span> load_state_dict_from_zero_checkpoint checkpoint_dir = os.path.join(trainer.args.output_dir, <span class="hljs-string">&quot;checkpoint-final&quot;</span>) trainer.deepspeed.save_checkpoint(checkpoint_dir) fp32_model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Note, that once <code>load_state_dict_from_zero_checkpoint</code> was run, the <code>model</code> will no longer be useable in the DeepSpeed context of the same application. i.e. you will need to re-initialize the deepspeed engine, since <code>model.load_state_dict(state_dict)</code> will remove all the DeepSpeed magic from it. So do this only at the very end of the training.</p></div> <p>Of course, you don’t have to use class:<em>~transformers.Trainer</em> and you can adjust the examples above to your own trainer.</p> <p>If for some reason you want more refinement, you can also extract the fp32 <code>state_dict</code> of the weights and apply these yourself as is shown in the following example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> deepspeed.utils.zero_to_fp32 <span class="hljs-keyword">import</span> get_fp32_state_dict_from_zero_checkpoint state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) <span class="hljs-comment"># already on cpu</span> model = model.cpu() model.load_state_dict(state_dict)<!-- HTML_TAG_END --></pre></div> <p><strong>Offline FP32 Weights Recovery:</strong></p> <p>DeepSpeed creates a special conversion script <code>zero_to_fp32.py</code> which it places in the top-level of the checkpoint folder. Using this script you can extract the weights at any point. The script is standalone and you no longer need to have the configuration file or a <code>Trainer</code> to do the extraction.</p> <p>Let’s say your checkpoint folder looks like this:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->$ <span class="hljs-built_in">ls</span> -l output_dir/checkpoint-1/ -rw-rw-r-- 1 stas stas 1.4K Mar 27 20:42 config.json drwxrwxr-x 2 stas stas 4.0K Mar 25 19:52 global_step1/ -rw-rw-r-- 1 stas stas 12 Mar 27 13:16 latest -rw-rw-r-- 1 stas stas 827K Mar 27 20:42 optimizer.pt -rw-rw-r-- 1 stas stas 231M Mar 27 20:42 pytorch_model.bin -rw-rw-r-- 1 stas stas 623 Mar 27 20:42 scheduler.pt -rw-rw-r-- 1 stas stas 1.8K Mar 27 20:42 special_tokens_map.json -rw-rw-r-- 1 stas stas 774K Mar 27 20:42 spiece.model -rw-rw-r-- 1 stas stas 1.9K Mar 27 20:42 tokenizer_config.json -rw-rw-r-- 1 stas stas 339 Mar 27 20:42 trainer_state.json -rw-rw-r-- 1 stas stas 2.3K Mar 27 20:42 training_args.bin -rwxrw-r-- 1 stas stas 5.5K Mar 27 13:16 zero_to_fp32.py*<!-- HTML_TAG_END --></pre></div> <p>In this example there is just one DeepSpeed checkpoint sub-folder <em>global_step1</em>. Therefore to reconstruct the fp32 weights just run:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->python zero_to_fp32.py . pytorch_model.bin<!-- HTML_TAG_END --></pre></div> <p>This is it. <code>pytorch_model.bin</code> will now contain the full fp32 model weights consolidated from multiple GPUs.</p> <p>The script will automatically be able to handle either a ZeRO-2 or ZeRO-3 checkpoint.</p> <p><code>python zero_to_fp32.py -h</code> will give you usage details.</p> <p>The script will auto-discover the deepspeed sub-folder using the contents of the file <code>latest</code>, which in the current example will contain <code>global_step1</code>.</p> <p>Note: currently the script requires 2x general RAM of the final fp32 model weights.</p> <h3 class="relative group"><a id="zero3-and-infinity-nuances" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#zero3-and-infinity-nuances"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ZeRO-3 and Infinity Nuances </span></h3> <p>ZeRO-3 is quite different from ZeRO-2 because of its param sharding feature.</p> <p>ZeRO-Infinity further extends ZeRO-3 to support NVMe memory and multiple other speed and scalability improvements.</p> <p>While all the efforts were made for things to just work without needing any special changes to your models, in certain circumstances you may find the following information to be needed.</p> <h4 class="relative group"><a id="constructing-massive-models" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#constructing-massive-models"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Constructing Massive Models </span></h4> <p>DeepSpeed/ZeRO-3 can handle models with Trillions of parameters which may not fit onto the existing RAM. In such cases, but also if you want the initialization to happen much faster, initialize the model using <em>deepspeed.zero.Init()</em> context manager (which is also a function decorator), like so:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> T5ForConditionalGeneration, T5Config <span class="hljs-keyword">import</span> deepspeed <span class="hljs-keyword">with</span> deepspeed.zero.Init(): config = T5Config.from_pretrained(<span class="hljs-string">&quot;t5-small&quot;</span>) model = T5ForConditionalGeneration(config)<!-- HTML_TAG_END --></pre></div> <p>As you can see this gives you a randomly initialized model.</p> <p>If you want to use a pretrained model, <code>model_class.from_pretrained</code> will activate this feature as long as <code>is_deepspeed_zero3_enabled()</code> returns <code>True</code>, which currently is setup by the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a> object if the passed DeepSpeed configuration file contains ZeRO-3 config section. Thus you must create the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a> object <strong>before</strong> calling <code>from_pretrained</code>. Here is an example of a possible sequence:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModel, Trainer, TrainingArguments training_args = TrainingArguments(..., deepspeed=ds_config) model = AutoModel.from_pretrained(<span class="hljs-string">&quot;t5-small&quot;</span>) trainer = Trainer(model=model, args=training_args, ...)<!-- HTML_TAG_END --></pre></div> <p>If you’re using the official example scripts and your command line arguments include <code>--deepspeed ds_config.json</code> with ZeRO-3 config enabled, then everything is already done for you, since this is how example scripts are written.</p> <p>Note: If the fp16 weights of the model can’t fit onto the memory of a single GPU this feature must be used.</p> <p>For full details on this method and other related features please refer to <a href="https://deepspeed.readthedocs.io/en/latest/zero3.html#constructing-massive-models" rel="nofollow">Constructing Massive Models</a>.</p> <p>Also when loading fp16-pretrained models, you will want to tell <code>from_pretrained</code> to use <code>torch_dtype=torch.float16</code>. For details, please, see <a href="#from_pretrained-torch-dtype">from_pretrained-torch-dtype</a>.</p> <h4 class="relative group"><a id="gathering-parameters" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#gathering-parameters"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Gathering Parameters </span></h4> <p>Under ZeRO-3 on multiple GPUs no single GPU has all the parameters unless it’s the parameters for the currently executing layer. So if you need to access all parameters from all layers at once there is a specific method to do it. Most likely you won’t need it, but if you do please refer to <a href="https://deepspeed.readthedocs.io/en/latest/zero3.html#manual-parameter-coordination" rel="nofollow">Gathering Parameters</a></p> <p>We do however use it internally in several places, one such example is when loading pretrained model weights in <code>from_pretrained</code>. We load one layer at a time and immediately partition it to all participating GPUs, as for very large models it won’t be possible to load it on one GPU and then spread it out to multiple GPUs, due to memory limitations.</p> <p>Also under ZeRO-3, if you write your own code and run into a model parameter weight that looks like:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->tensor([<span class="hljs-number">1.0</span>], device=<span class="hljs-string">&quot;cuda:0&quot;</span>, dtype=torch.float16, requires_grad=<span class="hljs-literal">True</span>)<!-- HTML_TAG_END --></pre></div> <p>stress on <code>tensor([1.])</code>, or if you get an error where it says the parameter is of size <code>1</code>, instead of some much larger multi-dimensional shape, this means that the parameter is partitioned and what you see is a ZeRO-3 placeholder.</p> <a id="deepspeed-zero-inference"></a> <h3 class="relative group"><a id="zero-inference" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#zero-inference"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ZeRO Inference </span></h3> <p>ZeRO Inference uses the same config as ZeRO-3 Training. You just don’t need the optimizer and scheduler sections. In fact you can leave these in the config file if you want to share the same one with the training. They will just be ignored.</p> <p>Otherwise you just need to pass the usual <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a> arguments. For example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->deepspeed --num_gpus=2 your_program.py &lt;normal cl args&gt; --do_eval --deepspeed ds_config.json<!-- HTML_TAG_END --></pre></div> <p>The only important thing is that you need to use a ZeRO-3 configuration, since ZeRO-2 provides no benefit whatsoever for the inference as only ZeRO-3 performs sharding of parameters, whereas ZeRO-1 shards gradients and optimizer states.</p> <p>Here is an example of running <code>run_translation.py</code> under DeepSpeed deploying all available GPUs:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->deepspeed examples/pytorch/translation/run_translation.py \ --deepspeed tests/deepspeed/ds_config_zero3.json \ --model_name_or_path t5-small --output_dir output_dir \ --do_eval --max_eval_samples 50 --warmup_steps 50 \ --max_source_length 128 --val_max_target_length 128 \ --overwrite_output_dir --per_device_eval_batch_size 4 \ --predict_with_generate --dataset_config <span class="hljs-string">&quot;ro-en&quot;</span> --fp16 \ --source_lang en --target_lang ro --dataset_name wmt16 \ --source_prefix <span class="hljs-string">&quot;translate English to Romanian: &quot;</span><!-- HTML_TAG_END --></pre></div> <p>Since for inference there is no need for additional large memory used by the optimizer states and the gradients you should be able to fit much larger batches and/or sequence length onto the same hardware.</p> <p>Additionally DeepSpeed is currently developing a related product called Deepspeed-Inference which has no relationship to the ZeRO technology, but instead uses tensor parallelism to scale models that can’t fit onto a single GPU. This is a work in progress and we will provide the integration once that product is complete.</p> <h3 class="relative group"><a id="memory-requirements" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#memory-requirements"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Memory Requirements </span></h3> <p>Since Deepspeed ZeRO can offload memory to CPU (and NVMe) the framework provides utils that allow one to tell how much CPU and GPU memory will be needed depending on the number of GPUs being used.</p> <p>Let’s estimate how much memory is needed to finetune “bigscience/T0_3B” on a single GPU:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->$ python -c <span class="hljs-string">&#x27;from transformers import AutoModel; \ from deepspeed.runtime.zero.stage3 import estimate_zero3_model_states_mem_needs_all_live; \ model = AutoModel.from_pretrained(&quot;bigscience/T0_3B&quot;); \ estimate_zero3_model_states_mem_needs_all_live(model, num_gpus_per_node=1, num_nodes=1)&#x27;</span> [...] Estimated memory needed <span class="hljs-keyword">for</span> params, optim states and gradients <span class="hljs-keyword">for</span> a: HW: Setup with 1 node, 1 GPU per node. SW: Model with 2783M total params, 65M largest layer params. per CPU | per GPU | Options 70.00GB | 0.25GB | offload_param=cpu , offload_optimizer=cpu , zero_init=1 70.00GB | 0.25GB | offload_param=cpu , offload_optimizer=cpu , zero_init=0 62.23GB | 5.43GB | offload_param=none, offload_optimizer=cpu , zero_init=1 62.23GB | 5.43GB | offload_param=none, offload_optimizer=cpu , zero_init=0 0.37GB | 46.91GB | offload_param=none, offload_optimizer=none, zero_init=1 15.56GB | 46.91GB | offload_param=none, offload_optimizer=none, zero_init=0<!-- HTML_TAG_END --></pre></div> <p>So you can fit it on a single 80GB GPU and no CPU offload, or a tiny 8GB GPU but then need ~60GB of CPU memory. (Remember this is just the memory for params, optimizer states and gradients - you will need a bit more memory for cuda kernels, activations and temps.)</p> <p>Then it’s a tradeoff of cost vs speed. It’ll be cheaper to buy/rent a smaller GPU (or less GPUs since you can use multiple GPUs with Deepspeed ZeRO. But then it’ll be slower, so even if you don’t care about how fast something will be done, the slowdown has a direct impact on the duration of using the GPU and thus bigger cost. So experiment and compare which works the best.</p> <p>If you have enough GPU memory make sure to disable the CPU/NVMe offload as it’ll make everything faster.</p> <p>For example, let’s repeat the same for 2 GPUs:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->$ python -c <span class="hljs-string">&#x27;from transformers import AutoModel; \ from deepspeed.runtime.zero.stage3 import estimate_zero3_model_states_mem_needs_all_live; \ model = AutoModel.from_pretrained(&quot;bigscience/T0_3B&quot;); \ estimate_zero3_model_states_mem_needs_all_live(model, num_gpus_per_node=2, num_nodes=1)&#x27;</span> [...] Estimated memory needed <span class="hljs-keyword">for</span> params, optim states and gradients <span class="hljs-keyword">for</span> a: HW: Setup with 1 node, 2 GPUs per node. SW: Model with 2783M total params, 65M largest layer params. per CPU | per GPU | Options 70.00GB | 0.25GB | offload_param=cpu , offload_optimizer=cpu , zero_init=1 70.00GB | 0.25GB | offload_param=cpu , offload_optimizer=cpu , zero_init=0 62.23GB | 2.84GB | offload_param=none, offload_optimizer=cpu , zero_init=1 62.23GB | 2.84GB | offload_param=none, offload_optimizer=cpu , zero_init=0 0.74GB | 23.58GB | offload_param=none, offload_optimizer=none, zero_init=1 31.11GB | 23.58GB | offload_param=none, offload_optimizer=none, zero_init=0 <!-- HTML_TAG_END --></pre></div> <p>So here you’d want 2x 32GB GPUs or higher without offloading to CPU.</p> <p>For full information please see <a href="https://deepspeed.readthedocs.io/en/latest/memory.html" rel="nofollow">memory estimators</a>.</p> <h3 class="relative group"><a id="filing-issues" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#filing-issues"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Filing Issues </span></h3> <p>Here is how to file an issue so that we could quickly get to the bottom of the issue and help you to unblock your work.</p> <p>In your report please always include:</p> <ol><li><p>the full Deepspeed config file in the report</p></li> <li><p>either the command line arguments if you were using the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> or <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a> arguments if you were scripting the Trainer setup yourself. Please do not dump the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a> as it has dozens of entries that are irrelevant.</p></li> <li><p>Output of:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->python -c <span class="hljs-string">&#x27;import torch; print(f&quot;torch: {torch.__version__}&quot;)&#x27;</span> python -c <span class="hljs-string">&#x27;import transformers; print(f&quot;transformers: {transformers.__version__}&quot;)&#x27;</span> python -c <span class="hljs-string">&#x27;import deepspeed; print(f&quot;deepspeed: {deepspeed.__version__}&quot;)&#x27;</span><!-- HTML_TAG_END --></pre></div></li> <li><p>If possible include a link to a Google Colab notebook that we can reproduce the problem with. You can use this <a href="https://github.com/stas00/porting/blob/master/transformers/deepspeed/DeepSpeed_on_colab_CLI.ipynb" rel="nofollow">notebook</a> as a starting point.</p></li> <li><p>Unless it’s impossible please always use a standard dataset that we can use and not something custom.</p></li> <li><p>If possible try to use one of the existing <a href="https://github.com/huggingface/transformers/tree/master/examples/pytorch" rel="nofollow">examples</a> to reproduce the problem with.</p></li></ol> <p>Things to consider:</p> <ul><li><p>Deepspeed is often not the cause of the problem.</p> <p>Some of the filed issues proved to be Deepspeed-unrelated. That is once Deepspeed was removed from the setup, the problem was still there.</p> <p>Therefore, if it’s not absolutely obvious it’s a DeepSpeed-related problem, as in you can see that there is an exception and you can see that DeepSpeed modules are involved, first re-test your setup without DeepSpeed in it. And only if the problem persists then do mentioned Deepspeed and supply all the required details.</p></li> <li><p>If it’s clear to you that the issue is in the DeepSpeed core and not the integration part, please file the Issue directly with <a href="https://github.com/microsoft/DeepSpeed/" rel="nofollow">Deepspeed</a>. If you aren’t sure, please do not worry, either Issue tracker will do, we will figure it out once you posted it and redirect you to another Issue tracker if need be.</p></li></ul> <h3 class="relative group"><a id="troubleshooting" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#troubleshooting"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Troubleshooting </span></h3> <h4 class="relative group"><a id="the-deepspeed-process-gets-killed-at-startup-without-a-traceback" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#the-deepspeed-process-gets-killed-at-startup-without-a-traceback"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>the <code>deepspeed</code> process gets killed at startup without a traceback </span></h4> <p>If the <code>deepspeed</code> process gets killed at launch time without a traceback, that usually means that the program tried to allocate more CPU memory than your system has or your process is allowed to allocate and the OS kernel killed that process. This is because your configuration file most likely has either <code>offload_optimizer</code> or <code>offload_param</code> or both configured to offload to <code>cpu</code>. If you have NVMe, experiment with offloading to NVMe if you’re running under ZeRO-3. Here is how you can <a href="https://deepspeed.readthedocs.io/en/latest/memory.html" rel="nofollow">estimate how much memory is needed for a specific model</a>.</p> <h4 class="relative group"><a id="training-andor-evalpredict-loss-is-nan" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#training-andor-evalpredict-loss-is-nan"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>training and/or eval/predict loss is <code>NaN</code></span></h4> <p>This often happens when one takes a model pre-trained in bf16 mixed precision mode and tries to use it under fp16 (with or without mixed precision). Most models trained on TPU and often the ones released by Google are in this category (e.g. almost all t5-based models). Here the solution is to either use fp32 or bf16 if your hardware supports it (TPU, Ampere GPUs or newer).</p> <p>The other problem may have to do with using fp16. When you configure this section:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;fp16&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-punctuation">{</span> <span class="hljs-attr">&quot;enabled&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-string">&quot;auto&quot;</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">0</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;loss_scale_window&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1000</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;initial_scale_power&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">16</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;hysteresis&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">2</span><span class="hljs-punctuation">,</span> <span class="hljs-attr">&quot;min_loss_scale&quot;</span><span class="hljs-punctuation">:</span> <span class="hljs-number">1</span> <span class="hljs-punctuation">}</span> <span class="hljs-punctuation">}</span><!-- HTML_TAG_END --></pre></div> <p>and you see in your log that Deepspeed reports <code>OVERFLOW!</code> as follows:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-number">0</span>%| | <span class="hljs-number">0</span>/<span class="hljs-number">189</span> [<span class="hljs-number">00</span>:<span class="hljs-number">00</span>&lt;?, ?it/s] [deepscale] OVERFLOW! <span class="hljs-built_in">Rank</span> <span class="hljs-number">0</span> Skipping <span class="hljs-built_in">step</span>. Attempted loss scale: <span class="hljs-number">262144</span>, reducing <span class="hljs-keyword">to</span> <span class="hljs-number">262144</span> <span class="hljs-number">1</span>%|▌ | <span class="hljs-number">1</span>/<span class="hljs-number">189</span> [<span class="hljs-number">00</span>:<span class="hljs-number">00</span>&lt;<span class="hljs-number">01</span>:<span class="hljs-number">26</span>, <span class="hljs-number">2.17</span>it/s] [deepscale] OVERFLOW! <span class="hljs-built_in">Rank</span> <span class="hljs-number">0</span> Skipping <span class="hljs-built_in">step</span>. Attempted loss scale: <span class="hljs-number">262144</span>, reducing <span class="hljs-keyword">to</span> <span class="hljs-number">131072.0</span> <span class="hljs-number">1</span>%|█▏ [...] [deepscale] OVERFLOW! <span class="hljs-built_in">Rank</span> <span class="hljs-number">0</span> Skipping <span class="hljs-built_in">step</span>. Attempted loss scale: <span class="hljs-number">1</span>, reducing <span class="hljs-keyword">to</span> <span class="hljs-number">1</span> <span class="hljs-number">14</span>%|████████████████▌ | <span class="hljs-number">27</span>/<span class="hljs-number">189</span> [<span class="hljs-number">00</span>:<span class="hljs-number">14</span>&lt;<span class="hljs-number">01</span>:<span class="hljs-number">13</span>, <span class="hljs-number">2.21</span>it/s] [deepscale] OVERFLOW! <span class="hljs-built_in">Rank</span> <span class="hljs-number">0</span> Skipping <span class="hljs-built_in">step</span>. Attempted loss scale: <span class="hljs-number">1</span>, reducing <span class="hljs-keyword">to</span> <span class="hljs-number">1</span> <span class="hljs-number">15</span>%|█████████████████▏ | <span class="hljs-number">28</span>/<span class="hljs-number">189</span> [<span class="hljs-number">00</span>:<span class="hljs-number">14</span>&lt;<span class="hljs-number">01</span>:<span class="hljs-number">13</span>, <span class="hljs-number">2.18</span>it/s] [deepscale] OVERFLOW! <span class="hljs-built_in">Rank</span> <span class="hljs-number">0</span> Skipping <span class="hljs-built_in">step</span>. Attempted loss scale: <span class="hljs-number">1</span>, reducing <span class="hljs-keyword">to</span> <span class="hljs-number">1</span> <span class="hljs-number">15</span>%|█████████████████▊ | <span class="hljs-number">29</span>/<span class="hljs-number">189</span> [<span class="hljs-number">00</span>:<span class="hljs-number">15</span>&lt;<span class="hljs-number">01</span>:<span class="hljs-number">13</span>, <span class="hljs-number">2.18</span>it/s] [deepscale] OVERFLOW! <span class="hljs-built_in">Rank</span> <span class="hljs-number">0</span> Skipping <span class="hljs-built_in">step</span>. Attempted loss scale: <span class="hljs-number">1</span>, reducing <span class="hljs-keyword">to</span> <span class="hljs-number">1</span> [...]<!-- HTML_TAG_END --></pre></div> <p>that means that the Deepspeed loss scaler can’t figure out a scaling co-efficient that overcomes loss overflow.</p> <p>(the log was massaged to be more readable here.)</p> <p>In this case you usually need to raise the value of <code>initial_scale_power</code>. Setting it to <code>&quot;initial_scale_power&quot;: 32</code> will typically resolve the problem.</p> <h3 class="relative group"><a id="notes" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#notes"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Notes </span></h3> <ul><li>DeepSpeed works with the PyTorch <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> but not TF <code>TFTrainer</code></li> <li>While DeepSpeed has a pip installable PyPI package, it is highly recommended that it gets installed from <a href="https://github.com/microsoft/deepspeed#installation" rel="nofollow">source</a> to best match your hardware and also if you need to enable certain features, like 1-bit Adam, which aren’t available in the pypi distribution.</li> <li>You don’t have to use the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> to use DeepSpeed with 🤗 Transformers - you can use any model with your own trainer, and you will have to adapt the latter according to <a href="https://www.deepspeed.ai/getting-started/#writing-deepspeed-models" rel="nofollow">the DeepSpeed integration instructions</a>.</li></ul> <a id="deepspeed-non-trainer-integration"></a> <h2 class="relative group"><a id="nontrainer-deepspeed-integration" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#nontrainer-deepspeed-integration"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Non-Trainer Deepspeed Integration </span></h2> <p>The <a href="/docs/transformers/pr_16143/en/main_classes/deepspeed#transformers.deepspeed.HfDeepSpeedConfig">HfDeepSpeedConfig</a> is used to integrate Deepspeed into the 🤗 Transformers core functionality, when <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> is not used. The only thing that it does is handling Deepspeed ZeRO 3 param gathering and automatically splitting the model onto multiple gpus during <code>from_pretrained</code> call. Everything else you have to do by yourself.</p> <p>When using <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> everything is automatically taken care of.</p> <p>When not using <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>, to efficiently deploy DeepSpeed stage 3, you must instantiate the <a href="/docs/transformers/pr_16143/en/main_classes/deepspeed#transformers.deepspeed.HfDeepSpeedConfig">HfDeepSpeedConfig</a> object before instantiating the model.</p> <p>For example for a pretrained model:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers.deepspeed <span class="hljs-keyword">import</span> HfDeepSpeedConfig <span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModel <span class="hljs-keyword">import</span> deepspeed ds_config = {...} <span class="hljs-comment"># deepspeed config object or path to the file</span> <span class="hljs-comment"># must run before instantiating the model to detect zero 3</span> dschf = HfDeepSpeedConfig(ds_config) <span class="hljs-comment"># keep this object alive</span> model = AutoModel.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) engine = deepspeed.initialize(model=model, config_params=ds_config, ...)<!-- HTML_TAG_END --></pre></div> <p>or for non-pretrained model:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers.deepspeed <span class="hljs-keyword">import</span> HfDeepSpeedConfig <span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModel, AutoConfig <span class="hljs-keyword">import</span> deepspeed ds_config = {...} <span class="hljs-comment"># deepspeed config object or path to the file</span> <span class="hljs-comment"># must run before instantiating the model to detect zero 3</span> dschf = HfDeepSpeedConfig(ds_config) <span class="hljs-comment"># keep this object alive</span> config = AutoConfig.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) model = AutoModel.from_config(config) engine = deepspeed.initialize(model=model, config_params=ds_config, ...)<!-- HTML_TAG_END --></pre></div> <p>Please note that if you’re not using the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> integration, you’re completely on your own. Basically follow the documentation on the <a href="https://www.deepspeed.ai/" rel="nofollow">Deepspeed</a> website. Also you have to configure explicitly the config file - you can’t use <code>&quot;auto&quot;</code> values and you will have to put real values instead.</p> <h2 class="relative group"><a id="transformers.deepspeed.HfDeepSpeedConfig" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.deepspeed.HfDeepSpeedConfig"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>HfDeepSpeedConfig </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.deepspeed.HfDeepSpeedConfig"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.deepspeed.</span><span class="font-semibold">HfDeepSpeedConfig</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.deepspeed.HfDeepSpeedConfig" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.deepspeed.HfDeepSpeedConfig"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/deepspeed.py#L40" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config_file_or_dict<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.deepspeed.HfDeepSpeedConfig.config_file_or_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.deepspeed.HfDeepSpeedConfig.config_file_or_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config_file_or_dict</strong> (<code>Union[str, Dict]</code>) &#x2014; path to DeepSpeed config file or dict.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>This object contains a DeepSpeed configuration dictionary and can be quickly queried for things like zero stage.</p> <p>A <code>weakref</code> of this object is stored in the module’s globals to be able to access the config from areas where things like the Trainer object is not available (e.g. <code>from_pretrained</code> and <code>_get_resized_embeddings</code>). Therefore it’s important that this object remains alive while the program is still running.</p> <p><a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> uses the <code>HfTrainerDeepSpeedConfig</code> subclass instead. That subclass has logic to sync the configuration with values of <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a> by replacing special placeholder values: <code>&quot;auto&quot;</code>. Without this special logic the DeepSpeed configuration is not modified in any way.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.deepspeed.HfDeepSpeedConfig.del_config_sub_tree"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>del_config_sub_tree</span></h4><!-- HTML_TAG_END --> <a id="transformers.deepspeed.HfDeepSpeedConfig.del_config_sub_tree" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.deepspeed.HfDeepSpeedConfig.del_config_sub_tree"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/deepspeed.py#L114" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ds_key_long<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">must_exist<span class="opacity-60"> = False</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Deletes a sub-section of the config file if it’s found.</p> <p>Unless <code>must_exist</code> is <code>True</code> the section doesn’t have to exist.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.deepspeed.HfDeepSpeedConfig.get_value"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_value</span></h4><!-- HTML_TAG_END --> <a id="transformers.deepspeed.HfDeepSpeedConfig.get_value" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.deepspeed.HfDeepSpeedConfig.get_value"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/deepspeed.py#L105" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ds_key_long<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">default<span class="opacity-60"> = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Returns the set value or <code>default</code> if no value is set</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.deepspeed.HfDeepSpeedConfig.is_false"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>is_false</span></h4><!-- HTML_TAG_END --> <a id="transformers.deepspeed.HfDeepSpeedConfig.is_false" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.deepspeed.HfDeepSpeedConfig.is_false"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/deepspeed.py#L146" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ds_key_long<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Returns <code>True</code>/<code>False` only if the value is set, always `False` otherwise. So use this method to ask the very specific question of whether the value is set to `False` (and it&#39;s not set to `True</code> or isn’t set).</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.deepspeed.HfDeepSpeedConfig.is_true"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>is_true</span></h4><!-- HTML_TAG_END --> <a id="transformers.deepspeed.HfDeepSpeedConfig.is_true" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.deepspeed.HfDeepSpeedConfig.is_true"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/deepspeed.py#L137" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ds_key_long<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Returns <code>True</code>/<code>False` only if the value is set, always `False` otherwise. So use this method to ask the very specific question of whether the value is set to `True` (and it&#39;s not set to `False</code> or isn’t set).</p></div></div> <h3 class="relative group"><a id="custom-deepspeed-zero-inference" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#custom-deepspeed-zero-inference"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Custom DeepSpeed ZeRO Inference </span></h3> <p>Here is an example of how one could do DeepSpeed ZeRO Inference without using <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> when one can’t fit a model onto a single GPU. The solution includes using additional GPUs or/and offloading GPU memory to CPU memory.</p> <p>The important nuance to understand here is that the way ZeRO is designed you can process different inputs on different GPUs in parallel.</p> <p>The example has copious notes and is self-documenting.</p> <p>Make sure to:</p> <ol><li>disable CPU offload if you have enough GPU memory (since it slows things down)</li> <li>enable bf16 if you own an Ampere or a newer GPU to make things faster. If you don’t have that hardware you may enable fp16 as long as you don’t use any model that was pre-trained in bf16 mixed precision (such as most t5 models). These usually overflow in fp16 and you will see garbage as output.</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-comment">#!/usr/bin/env python</span> <span class="hljs-comment"># This script demonstrates how to use Deepspeed ZeRO in an inference mode when one can&#x27;t fit a model</span> <span class="hljs-comment"># into a single GPU</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># 1. Use 1 GPU with CPU offload</span> <span class="hljs-comment"># 2. Or use multiple GPUs instead</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># First you need to install deepspeed: pip install deepspeed</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># Here we use a 3B &quot;bigscience/T0_3B&quot; model which needs about 15GB GPU RAM - so 1 largish or 2</span> <span class="hljs-comment"># small GPUs can handle it. or 1 small GPU and a lot of CPU memory.</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># To use a larger model like &quot;bigscience/T0&quot; which needs about 50GB, unless you have an 80GB GPU -</span> <span class="hljs-comment"># you will need 2-4 gpus. And then you can adapt the script to handle more gpus if you want to</span> <span class="hljs-comment"># process multiple inputs at once.</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># The provided deepspeed config also activates CPU memory offloading, so chances are that if you</span> <span class="hljs-comment"># have a lot of available CPU memory and you don&#x27;t mind a slowdown you should be able to load a</span> <span class="hljs-comment"># model that doesn&#x27;t normally fit into a single GPU. If you have enough GPU memory the program will</span> <span class="hljs-comment"># run faster if you don&#x27;t want offload to CPU - so disable that section then.</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># To deploy on 1 gpu:</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># deepspeed --num_gpus 1 t0.py</span> <span class="hljs-comment"># or:</span> <span class="hljs-comment"># python -m torch.distributed.run --nproc_per_node=1 t0.py</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># To deploy on 2 gpus:</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># deepspeed --num_gpus 2 t0.py</span> <span class="hljs-comment"># or:</span> <span class="hljs-comment"># python -m torch.distributed.run --nproc_per_node=2 t0.py</span> <span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, AutoConfig, AutoModelForSeq2SeqLM <span class="hljs-keyword">from</span> transformers.deepspeed <span class="hljs-keyword">import</span> HfDeepSpeedConfig <span class="hljs-keyword">import</span> deepspeed <span class="hljs-keyword">import</span> os <span class="hljs-keyword">import</span> torch os.environ[<span class="hljs-string">&quot;TOKENIZERS_PARALLELISM&quot;</span>] = <span class="hljs-string">&quot;false&quot;</span> <span class="hljs-comment"># To avoid warnings about parallelism in tokenizers</span> <span class="hljs-comment"># distributed setup</span> local_rank = <span class="hljs-built_in">int</span>(os.getenv(<span class="hljs-string">&quot;LOCAL_RANK&quot;</span>, <span class="hljs-string">&quot;0&quot;</span>)) world_size = <span class="hljs-built_in">int</span>(os.getenv(<span class="hljs-string">&quot;WORLD_SIZE&quot;</span>, <span class="hljs-string">&quot;1&quot;</span>)) torch.cuda.set_device(local_rank) deepspeed.init_distributed() model_name = <span class="hljs-string">&quot;bigscience/T0_3B&quot;</span> config = AutoConfig.from_pretrained(model_name) model_hidden_size = config.d_model <span class="hljs-comment"># batch size has to be divisible by world_size, but can be bigger than world_size</span> train_batch_size = <span class="hljs-number">1</span> * world_size <span class="hljs-comment"># ds_config notes</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># - enable bf16 if you use Ampere or higher GPU - this will run in mixed precision and will be</span> <span class="hljs-comment"># faster.</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># - for older GPUs you can enable fp16, but it&#x27;ll only work for non-bf16 pretrained models - e.g.</span> <span class="hljs-comment"># all official t5 models are bf16-pretrained</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># - set offload_param.device to &quot;none&quot; or completely remove the `offload_param` section if you don&#x27;t</span> <span class="hljs-comment"># - want CPU offload</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># - if using `offload_param` you can manually finetune stage3_param_persistence_threshold to control</span> <span class="hljs-comment"># - which params should remain on gpus - the larger the value the smaller the offload size</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># For indepth info on Deepspeed config see</span> <span class="hljs-comment"># https://huggingface.co/docs/transformers/master/main_classes/deepspeed</span> <span class="hljs-comment"># keeping the same format as json for consistency, except it uses lower case for true/false</span> <span class="hljs-comment"># fmt: off</span> ds_config = { <span class="hljs-string">&quot;fp16&quot;</span>: { <span class="hljs-string">&quot;enabled&quot;</span>: <span class="hljs-literal">False</span> }, <span class="hljs-string">&quot;bf16&quot;</span>: { <span class="hljs-string">&quot;enabled&quot;</span>: <span class="hljs-literal">False</span> }, <span class="hljs-string">&quot;zero_optimization&quot;</span>: { <span class="hljs-string">&quot;stage&quot;</span>: <span class="hljs-number">3</span>, <span class="hljs-string">&quot;offload_param&quot;</span>: { <span class="hljs-string">&quot;device&quot;</span>: <span class="hljs-string">&quot;cpu&quot;</span>, <span class="hljs-string">&quot;pin_memory&quot;</span>: <span class="hljs-literal">True</span> }, <span class="hljs-string">&quot;overlap_comm&quot;</span>: <span class="hljs-literal">True</span>, <span class="hljs-string">&quot;contiguous_gradients&quot;</span>: <span class="hljs-literal">True</span>, <span class="hljs-string">&quot;reduce_bucket_size&quot;</span>: model_hidden_size * model_hidden_size, <span class="hljs-string">&quot;stage3_prefetch_bucket_size&quot;</span>: <span class="hljs-number">0.9</span> * model_hidden_size * model_hidden_size, <span class="hljs-string">&quot;stage3_param_persistence_threshold&quot;</span>: <span class="hljs-number">10</span> * model_hidden_size }, <span class="hljs-string">&quot;steps_per_print&quot;</span>: <span class="hljs-number">2000</span>, <span class="hljs-string">&quot;train_batch_size&quot;</span>: train_batch_size, <span class="hljs-string">&quot;train_micro_batch_size_per_gpu&quot;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&quot;wall_clock_breakdown&quot;</span>: <span class="hljs-literal">False</span> } <span class="hljs-comment"># fmt: on</span> <span class="hljs-comment"># next line instructs transformers to partition the model directly over multiple gpus using</span> <span class="hljs-comment"># deepspeed.zero.Init when model&#x27;s `from_pretrained` method is called.</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># **it has to be run before loading the model AutoModelForSeq2SeqLM.from_pretrained(model_name)**</span> <span class="hljs-comment">#</span> <span class="hljs-comment"># otherwise the model will first be loaded normally and only partitioned at forward time which is</span> <span class="hljs-comment"># less efficient and when there is little CPU RAM may fail</span> dschf = HfDeepSpeedConfig(ds_config) <span class="hljs-comment"># keep this object alive</span> <span class="hljs-comment"># now a model can be loaded.</span> model = AutoModelForSeq2SeqLM.from_pretrained(model_name) <span class="hljs-comment"># initialise Deepspeed ZeRO and store only the engine object</span> ds_engine = deepspeed.initialize(model=model, config_params=ds_config)[<span class="hljs-number">0</span>] ds_engine.module.<span class="hljs-built_in">eval</span>() <span class="hljs-comment"># inference</span> <span class="hljs-comment"># Deepspeed ZeRO can process unrelated inputs on each GPU. So for 2 gpus you process 2 inputs at once.</span> <span class="hljs-comment"># If you use more GPUs adjust for more.</span> <span class="hljs-comment"># And of course if you have just one input to process you then need to pass the same string to both gpus</span> <span class="hljs-comment"># If you use only one GPU, then you will have only rank 0.</span> rank = torch.distributed.get_rank() <span class="hljs-keyword">if</span> rank == <span class="hljs-number">0</span>: text_in = <span class="hljs-string">&quot;Is this review positive or negative? Review: this is the best cast iron skillet you will ever buy&quot;</span> <span class="hljs-keyword">elif</span> rank == <span class="hljs-number">1</span>: text_in = <span class="hljs-string">&quot;Is this review positive or negative? Review: this is the worst restaurant ever&quot;</span> tokenizer = AutoTokenizer.from_pretrained(model_name) inputs = tokenizer.encode(text_in, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>).to(device=local_rank) <span class="hljs-keyword">with</span> torch.no_grad(): outputs = ds_engine.module.generate(inputs, synced_gpus=<span class="hljs-literal">True</span>) text_out = tokenizer.decode(outputs[<span class="hljs-number">0</span>], skip_special_tokens=<span class="hljs-literal">True</span>) <span class="hljs-built_in">print</span>(<span class="hljs-string">f&quot;rank<span class="hljs-subst">{rank}</span>:\n in=<span class="hljs-subst">{text_in}</span>\n out=<span class="hljs-subst">{text_out}</span>&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Let’s save it as <code>t0.py</code> and run it:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->$ deepspeed --num_gpus <span class="hljs-number">2</span> t0.py rank0: <span class="hljs-keyword">in</span>=Is <span class="hljs-keyword">this</span> review positive or negative? Review: <span class="hljs-keyword">this</span> <span class="hljs-keyword">is</span> the best cast iron skillet you will ever buy <span class="hljs-keyword">out</span>=Positive rank1: <span class="hljs-keyword">in</span>=Is <span class="hljs-keyword">this</span> review positive or negative? Review: <span class="hljs-keyword">this</span> <span class="hljs-keyword">is</span> the worst restaurant ever <span class="hljs-keyword">out</span>=negative<!-- HTML_TAG_END --></pre></div> <p>This was a very basic example and you will want to adapt it to your needs.</p> <h2 class="relative group"><a id="main-deepspeed-resources" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#main-deepspeed-resources"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Main DeepSpeed Resources </span></h2> <ul><li><a href="https://github.com/microsoft/deepspeed" rel="nofollow">Project’s github</a></li> <li><a href="https://www.deepspeed.ai/getting-started/" rel="nofollow">Usage docs</a></li> <li><a href="https://deepspeed.readthedocs.io/en/latest/index.html" rel="nofollow">API docs</a></li> <li><a href="https://www.microsoft.com/en-us/research/search/?q=deepspeed" rel="nofollow">Blog posts</a></li></ul> <p>Papers:</p> <ul><li><a href="https://arxiv.org/abs/1910.02054" rel="nofollow">ZeRO: Memory Optimizations Toward Training Trillion Parameter Models</a></li> <li><a href="https://arxiv.org/abs/2101.06840" rel="nofollow">ZeRO-Offload: Democratizing Billion-Scale Model Training</a></li> <li><a href="https://arxiv.org/abs/2104.07857" rel="nofollow">ZeRO-Infinity: Breaking the GPU Memory Wall for Extreme Scale Deep Learning</a></li></ul> <p>Finally, please, remember that, HuggingFace <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> only integrates DeepSpeed, therefore if you have any problems or questions with regards to DeepSpeed usage, please, file an issue with <a href="https://github.com/microsoft/DeepSpeed/issues" rel="nofollow">DeepSpeed GitHub</a>.</p> <script type="module" data-hydrate="1p7zog8"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="1p7zog8"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/main_classes/deepspeed.mdx-f19f464c.js") ], params: {} } }); </script>
436
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/main_classes/output.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;model-outputs&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.file_utils.ModelOutput&quot;,&quot;title&quot;:&quot;ModelOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_outputs.BaseModelOutput&quot;,&quot;title&quot;:&quot;BaseModelOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_outputs.BaseModelOutputWithPooling&quot;,&quot;title&quot;:&quot;BaseModelOutputWithPooling&quot;},{&quot;local&quot;:&quot;transformers.modeling_outputs.BaseModelOutputWithCrossAttentions&quot;,&quot;title&quot;:&quot;BaseModelOutputWithCrossAttentions&quot;},{&quot;local&quot;:&quot;transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions&quot;,&quot;title&quot;:&quot;BaseModelOutputWithPoolingAndCrossAttentions&quot;},{&quot;local&quot;:&quot;transformers.modeling_outputs.BaseModelOutputWithPast&quot;,&quot;title&quot;:&quot;BaseModelOutputWithPast&quot;},{&quot;local&quot;:&quot;transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions&quot;,&quot;title&quot;:&quot;BaseModelOutputWithPastAndCrossAttentions&quot;},{&quot;local&quot;:&quot;transformers.modeling_outputs.Seq2SeqModelOutput&quot;,&quot;title&quot;:&quot;Seq2SeqModelOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_outputs.CausalLMOutput&quot;,&quot;title&quot;:&quot;CausalLMOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_outputs.CausalLMOutputWithCrossAttentions&quot;,&quot;title&quot;:&quot;CausalLMOutputWithCrossAttentions&quot;},{&quot;local&quot;:&quot;transformers.modeling_outputs.CausalLMOutputWithPast&quot;,&quot;title&quot;:&quot;CausalLMOutputWithPast&quot;},{&quot;local&quot;:&quot;transformers.modeling_outputs.MaskedLMOutput&quot;,&quot;title&quot;:&quot;MaskedLMOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_outputs.Seq2SeqLMOutput&quot;,&quot;title&quot;:&quot;Seq2SeqLMOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_outputs.NextSentencePredictorOutput&quot;,&quot;title&quot;:&quot;NextSentencePredictorOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_outputs.SequenceClassifierOutput&quot;,&quot;title&quot;:&quot;SequenceClassifierOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput&quot;,&quot;title&quot;:&quot;Seq2SeqSequenceClassifierOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_outputs.MultipleChoiceModelOutput&quot;,&quot;title&quot;:&quot;MultipleChoiceModelOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_outputs.TokenClassifierOutput&quot;,&quot;title&quot;:&quot;TokenClassifierOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_outputs.QuestionAnsweringModelOutput&quot;,&quot;title&quot;:&quot;QuestionAnsweringModelOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput&quot;,&quot;title&quot;:&quot;Seq2SeqQuestionAnsweringModelOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_tf_outputs.TFBaseModelOutput&quot;,&quot;title&quot;:&quot;TFBaseModelOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling&quot;,&quot;title&quot;:&quot;TFBaseModelOutputWithPooling&quot;},{&quot;local&quot;:&quot;transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions&quot;,&quot;title&quot;:&quot;TFBaseModelOutputWithPoolingAndCrossAttentions&quot;},{&quot;local&quot;:&quot;transformers.modeling_tf_outputs.TFBaseModelOutputWithPast&quot;,&quot;title&quot;:&quot;TFBaseModelOutputWithPast&quot;},{&quot;local&quot;:&quot;transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions&quot;,&quot;title&quot;:&quot;TFBaseModelOutputWithPastAndCrossAttentions&quot;},{&quot;local&quot;:&quot;transformers.modeling_tf_outputs.TFSeq2SeqModelOutput&quot;,&quot;title&quot;:&quot;TFSeq2SeqModelOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_tf_outputs.TFCausalLMOutput&quot;,&quot;title&quot;:&quot;TFCausalLMOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions&quot;,&quot;title&quot;:&quot;TFCausalLMOutputWithCrossAttentions&quot;},{&quot;local&quot;:&quot;transformers.modeling_tf_outputs.TFCausalLMOutputWithPast&quot;,&quot;title&quot;:&quot;TFCausalLMOutputWithPast&quot;},{&quot;local&quot;:&quot;transformers.modeling_tf_outputs.TFMaskedLMOutput&quot;,&quot;title&quot;:&quot;TFMaskedLMOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_tf_outputs.TFSeq2SeqLMOutput&quot;,&quot;title&quot;:&quot;TFSeq2SeqLMOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_tf_outputs.TFNextSentencePredictorOutput&quot;,&quot;title&quot;:&quot;TFNextSentencePredictorOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_tf_outputs.TFSequenceClassifierOutput&quot;,&quot;title&quot;:&quot;TFSequenceClassifierOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput&quot;,&quot;title&quot;:&quot;TFSeq2SeqSequenceClassifierOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput&quot;,&quot;title&quot;:&quot;TFMultipleChoiceModelOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_tf_outputs.TFTokenClassifierOutput&quot;,&quot;title&quot;:&quot;TFTokenClassifierOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput&quot;,&quot;title&quot;:&quot;TFQuestionAnsweringModelOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput&quot;,&quot;title&quot;:&quot;TFSeq2SeqQuestionAnsweringModelOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_flax_outputs.FlaxBaseModelOutput&quot;,&quot;title&quot;:&quot;FlaxBaseModelOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPast&quot;,&quot;title&quot;:&quot;FlaxBaseModelOutputWithPast&quot;},{&quot;local&quot;:&quot;transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling&quot;,&quot;title&quot;:&quot;FlaxBaseModelOutputWithPooling&quot;},{&quot;local&quot;:&quot;transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions&quot;,&quot;title&quot;:&quot;FlaxBaseModelOutputWithPastAndCrossAttentions&quot;},{&quot;local&quot;:&quot;transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput&quot;,&quot;title&quot;:&quot;FlaxSeq2SeqModelOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions&quot;,&quot;title&quot;:&quot;FlaxCausalLMOutputWithCrossAttentions&quot;},{&quot;local&quot;:&quot;transformers.modeling_flax_outputs.FlaxMaskedLMOutput&quot;,&quot;title&quot;:&quot;FlaxMaskedLMOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput&quot;,&quot;title&quot;:&quot;FlaxSeq2SeqLMOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_flax_outputs.FlaxNextSentencePredictorOutput&quot;,&quot;title&quot;:&quot;FlaxNextSentencePredictorOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput&quot;,&quot;title&quot;:&quot;FlaxSequenceClassifierOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput&quot;,&quot;title&quot;:&quot;FlaxSeq2SeqSequenceClassifierOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput&quot;,&quot;title&quot;:&quot;FlaxMultipleChoiceModelOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_flax_outputs.FlaxTokenClassifierOutput&quot;,&quot;title&quot;:&quot;FlaxTokenClassifierOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput&quot;,&quot;title&quot;:&quot;FlaxQuestionAnsweringModelOutput&quot;},{&quot;local&quot;:&quot;transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput&quot;,&quot;title&quot;:&quot;FlaxSeq2SeqQuestionAnsweringModelOutput&quot;}],&quot;title&quot;:&quot;Model outputs&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/main_classes/output.mdx-bc3fe8ad.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="model-outputs" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#model-outputs"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Model outputs </span></h1> <p>All models have outputs that are instances of subclasses of <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a>. Those are data structures containing all the information returned by the model, but that can also be used as tuples or dictionaries.</p> <p>Let’s see of this looks on an example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BertTokenizer, BertForSequenceClassification <span class="hljs-keyword">import</span> torch tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>) model = BertForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>) inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute&quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) labels = torch.tensor([<span class="hljs-number">1</span>]).unsqueeze(<span class="hljs-number">0</span>) <span class="hljs-comment"># Batch size 1</span> outputs = model(**inputs, labels=labels)<!-- HTML_TAG_END --></pre></div> <p>The <code>outputs</code> object is a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.modeling_outputs.SequenceClassifierOutput">SequenceClassifierOutput</a>, as we can see in the documentation of that class below, it means it has an optional <code>loss</code>, a <code>logits</code> an optional <code>hidden_states</code> and an optional <code>attentions</code> attribute. Here we have the <code>loss</code> since we passed along <code>labels</code>, but we don’t have <code>hidden_states</code> and <code>attentions</code> because we didn’t pass <code>output_hidden_states=True</code> or <code>output_attentions=True</code>.</p> <p>You can access each attribute as you would usually do, and if that attribute has not been returned by the model, you will get <code>None</code>. Here for instance <code>outputs.loss</code> is the loss computed by the model, and <code>outputs.attentions</code> is <code>None</code>.</p> <p>When considering our <code>outputs</code> object as tuple, it only considers the attributes that don’t have <code>None</code> values. Here for instance, it has two elements, <code>loss</code> then <code>logits</code>, so</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->outputs[:<span class="hljs-number">2</span>]<!-- HTML_TAG_END --></pre></div> <p>will return the tuple <code>(outputs.loss, outputs.logits)</code> for instance.</p> <p>When considering our <code>outputs</code> object as dictionary, it only considers the attributes that don’t have <code>None</code> values. Here for instance, it has two keys that are <code>loss</code> and <code>logits</code>.</p> <p>We document here the generic model outputs that are used by more than one model type. Specific output types are documented on their corresponding model page.</p> <h2 class="relative group"><a id="transformers.file_utils.ModelOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.ModelOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ModelOutput </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.file_utils.ModelOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.file_utils.</span><span class="font-semibold">ModelOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.file_utils.ModelOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.file_utils.ModelOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/file_utils.py#L2614" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Base class for all model outputs as dataclass. Has a <code>__getitem__</code> that allows indexing by integer or slice (like a tuple) or strings (like a dictionary) that will ignore the <code>None</code> attributes. Otherwise behaves like a regular python dictionary.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"><p>You can’t unpack a <code>ModelOutput</code> directly. Use the <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput.to_tuple">to_tuple()</a> method to convert it to a tuple before.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.file_utils.ModelOutput.to_tuple"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>to_tuple</span></h4><!-- HTML_TAG_END --> <a id="transformers.file_utils.ModelOutput.to_tuple" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.file_utils.ModelOutput.to_tuple"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/file_utils.py#L2703" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Convert self to a tuple containing all the attributes/keys that are not <code>None</code>.</p></div></div> <h2 class="relative group"><a id="transformers.modeling_outputs.BaseModelOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>BaseModelOutput </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_outputs.BaseModelOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_outputs.</span><span class="font-semibold">BaseModelOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_outputs.BaseModelOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_outputs.BaseModelOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_outputs.py#L24" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutput.last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutput.last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for model’s outputs, with potential hidden states and attentions.</p></div> <h2 class="relative group"><a id="transformers.modeling_outputs.BaseModelOutputWithPooling" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithPooling"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>BaseModelOutputWithPooling </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_outputs.BaseModelOutputWithPooling"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_outputs.</span><span class="font-semibold">BaseModelOutputWithPooling</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_outputs.BaseModelOutputWithPooling" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_outputs.BaseModelOutputWithPooling"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_outputs.py#L50" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pooler_output<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutputWithPooling.last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithPooling.last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutputWithPooling.pooler_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithPooling.pooler_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, hidden_size)</code>) &#x2014; Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutputWithPooling.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithPooling.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutputWithPooling.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithPooling.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for model’s outputs that also contains a pooling of the last hidden states.</p></div> <h2 class="relative group"><a id="transformers.modeling_outputs.BaseModelOutputWithCrossAttentions" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithCrossAttentions"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>BaseModelOutputWithCrossAttentions </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_outputs.BaseModelOutputWithCrossAttentions"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_outputs.</span><span class="font-semibold">BaseModelOutputWithCrossAttentions</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_outputs.BaseModelOutputWithCrossAttentions" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_outputs.BaseModelOutputWithCrossAttentions"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_outputs.py#L121" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutputWithCrossAttentions.last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithCrossAttentions.last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutputWithCrossAttentions.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithCrossAttentions.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutputWithCrossAttentions.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithCrossAttentions.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutputWithCrossAttentions.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithCrossAttentions.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for model’s outputs, with potential hidden states and attentions.</p></div> <h2 class="relative group"><a id="transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>BaseModelOutputWithPoolingAndCrossAttentions </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_outputs.</span><span class="font-semibold">BaseModelOutputWithPoolingAndCrossAttentions</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_outputs.py#L154" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pooler_output<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions.last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions.last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions.pooler_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions.pooler_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pooler_output</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, hidden_size)</code>) &#x2014; Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for model’s outputs that also contains a pooling of the last hidden states.</p></div> <h2 class="relative group"><a id="transformers.modeling_outputs.BaseModelOutputWithPast" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithPast"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>BaseModelOutputWithPast </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_outputs.BaseModelOutputWithPast"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_outputs.</span><span class="font-semibold">BaseModelOutputWithPast</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_outputs.BaseModelOutputWithPast" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_outputs.BaseModelOutputWithPast"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_outputs.py#L82" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutputWithPast.last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithPast.last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutputWithPast.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithPast.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutputWithPast.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithPast.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutputWithPast.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithPast.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for model’s outputs that may also contain a past key/values (to speed up sequential decoding).</p></div> <h2 class="relative group"><a id="transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>BaseModelOutputWithPastAndCrossAttentions </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_outputs.</span><span class="font-semibold">BaseModelOutputWithPastAndCrossAttentions</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_outputs.py#L203" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions.last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions.last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for model’s outputs that may also contain a past key/values (to speed up sequential decoding).</p></div> <h2 class="relative group"><a id="transformers.modeling_outputs.Seq2SeqModelOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqModelOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Seq2SeqModelOutput </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_outputs.Seq2SeqModelOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_outputs.</span><span class="font-semibold">Seq2SeqModelOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_outputs.Seq2SeqModelOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_outputs.Seq2SeqModelOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_outputs.py#L249" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_last_hidden_state<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqModelOutput.last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqModelOutput.last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqModelOutput.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqModelOutput.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqModelOutput.decoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqModelOutput.decoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqModelOutput.decoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqModelOutput.decoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqModelOutput.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqModelOutput.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqModelOutput.encoder_last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqModelOutput.encoder_last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqModelOutput.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqModelOutput.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqModelOutput.encoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqModelOutput.encoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for model encoder’s outputs that also contains : pre-computed hidden states that can speed up sequential decoding.</p></div> <h2 class="relative group"><a id="transformers.modeling_outputs.CausalLMOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.CausalLMOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>CausalLMOutput </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_outputs.CausalLMOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_outputs.</span><span class="font-semibold">CausalLMOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_outputs.CausalLMOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_outputs.CausalLMOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_outputs.py#L310" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.CausalLMOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.CausalLMOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Language modeling loss (for next-token prediction).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.CausalLMOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.CausalLMOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.CausalLMOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.CausalLMOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.CausalLMOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.CausalLMOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for causal language model (or autoregressive) outputs.</p></div> <h2 class="relative group"><a id="transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>CausalLMOutputWithCrossAttentions </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_outputs.CausalLMOutputWithCrossAttentions"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_outputs.</span><span class="font-semibold">CausalLMOutputWithCrossAttentions</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_outputs.CausalLMOutputWithCrossAttentions" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_outputs.py#L375" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.CausalLMOutputWithCrossAttentions.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Language modeling loss (for next-token prediction).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.CausalLMOutputWithCrossAttentions.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.CausalLMOutputWithCrossAttentions.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.CausalLMOutputWithCrossAttentions.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.CausalLMOutputWithCrossAttentions.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.CausalLMOutputWithCrossAttentions.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.CausalLMOutputWithCrossAttentions.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for causal language model (or autoregressive) outputs.</p></div> <h2 class="relative group"><a id="transformers.modeling_outputs.CausalLMOutputWithPast" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.CausalLMOutputWithPast"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>CausalLMOutputWithPast </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_outputs.CausalLMOutputWithPast"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_outputs.</span><span class="font-semibold">CausalLMOutputWithPast</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_outputs.CausalLMOutputWithPast" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_outputs.CausalLMOutputWithPast"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_outputs.py#L339" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.CausalLMOutputWithPast.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.CausalLMOutputWithPast.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Language modeling loss (for next-token prediction).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.CausalLMOutputWithPast.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.CausalLMOutputWithPast.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.CausalLMOutputWithPast.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.CausalLMOutputWithPast.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>)</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.CausalLMOutputWithPast.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.CausalLMOutputWithPast.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.CausalLMOutputWithPast.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.CausalLMOutputWithPast.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for causal language model (or autoregressive) outputs.</p></div> <h2 class="relative group"><a id="transformers.modeling_outputs.MaskedLMOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.MaskedLMOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>MaskedLMOutput </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_outputs.MaskedLMOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_outputs.</span><span class="font-semibold">MaskedLMOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_outputs.MaskedLMOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_outputs.MaskedLMOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_outputs.py#L455" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.MaskedLMOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.MaskedLMOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Masked language modeling (MLM) loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.MaskedLMOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.MaskedLMOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.MaskedLMOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.MaskedLMOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.MaskedLMOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.MaskedLMOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for masked language models outputs.</p></div> <h2 class="relative group"><a id="transformers.modeling_outputs.Seq2SeqLMOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqLMOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Seq2SeqLMOutput </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_outputs.Seq2SeqLMOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_outputs.</span><span class="font-semibold">Seq2SeqLMOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_outputs.Seq2SeqLMOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_outputs.Seq2SeqLMOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_outputs.py#L484" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_last_hidden_state<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqLMOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqLMOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Language modeling loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqLMOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqLMOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqLMOutput.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqLMOutput.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqLMOutput.decoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqLMOutput.decoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqLMOutput.decoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqLMOutput.decoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqLMOutput.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqLMOutput.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqLMOutput.encoder_last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqLMOutput.encoder_last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqLMOutput.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqLMOutput.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqLMOutput.encoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqLMOutput.encoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for sequence-to-sequence language models outputs.</p></div> <h2 class="relative group"><a id="transformers.modeling_outputs.NextSentencePredictorOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.NextSentencePredictorOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>NextSentencePredictorOutput </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_outputs.NextSentencePredictorOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_outputs.</span><span class="font-semibold">NextSentencePredictorOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_outputs.NextSentencePredictorOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_outputs.NextSentencePredictorOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_outputs.py#L544" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.NextSentencePredictorOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.NextSentencePredictorOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>next_sentence_label</code> is provided) &#x2014; Next sequence prediction (classification) loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.NextSentencePredictorOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.NextSentencePredictorOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, 2)</code>) &#x2014; Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.NextSentencePredictorOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.NextSentencePredictorOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.NextSentencePredictorOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.NextSentencePredictorOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of models predicting if two sentences are consecutive or not.</p></div> <h2 class="relative group"><a id="transformers.modeling_outputs.SequenceClassifierOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.SequenceClassifierOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>SequenceClassifierOutput </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_outputs.SequenceClassifierOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_outputs.</span><span class="font-semibold">SequenceClassifierOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_outputs.SequenceClassifierOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_outputs.SequenceClassifierOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_outputs.py#L574" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.SequenceClassifierOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.SequenceClassifierOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Classification (or regression if config.num_labels==1) loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.SequenceClassifierOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.SequenceClassifierOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) &#x2014; Classification (or regression if config.num_labels==1) scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.SequenceClassifierOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.SequenceClassifierOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.SequenceClassifierOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.SequenceClassifierOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of sentence classification models.</p></div> <h2 class="relative group"><a id="transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Seq2SeqSequenceClassifierOutput </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_outputs.</span><span class="font-semibold">Seq2SeqSequenceClassifierOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_outputs.py#L603" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_last_hidden_state<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>label</code> is provided) &#x2014; Classification (or regression if config.num_labels==1) loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.num_labels)</code>) &#x2014; Classification (or regression if config.num_labels==1) scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput.decoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput.decoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput.decoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput.decoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput.encoder_last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput.encoder_last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput.encoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqSequenceClassifierOutput.encoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of sequence-to-sequence sentence classification models.</p></div> <h2 class="relative group"><a id="transformers.modeling_outputs.MultipleChoiceModelOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.MultipleChoiceModelOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>MultipleChoiceModelOutput </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_outputs.MultipleChoiceModelOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_outputs.</span><span class="font-semibold">MultipleChoiceModelOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_outputs.MultipleChoiceModelOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_outputs.MultipleChoiceModelOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_outputs.py#L663" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.MultipleChoiceModelOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.MultipleChoiceModelOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <em>(1,)</em>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Classification loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.MultipleChoiceModelOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.MultipleChoiceModelOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, num_choices)</code>) &#x2014; <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.MultipleChoiceModelOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.MultipleChoiceModelOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.MultipleChoiceModelOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.MultipleChoiceModelOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of multiple choice models.</p></div> <h2 class="relative group"><a id="transformers.modeling_outputs.TokenClassifierOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.TokenClassifierOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TokenClassifierOutput </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_outputs.TokenClassifierOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_outputs.</span><span class="font-semibold">TokenClassifierOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_outputs.TokenClassifierOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_outputs.TokenClassifierOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_outputs.py#L694" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.TokenClassifierOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.TokenClassifierOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Classification loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.TokenClassifierOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.TokenClassifierOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) &#x2014; Classification scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.TokenClassifierOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.TokenClassifierOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.TokenClassifierOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.TokenClassifierOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of token classification models.</p></div> <h2 class="relative group"><a id="transformers.modeling_outputs.QuestionAnsweringModelOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.QuestionAnsweringModelOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>QuestionAnsweringModelOutput </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_outputs.QuestionAnsweringModelOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_outputs.</span><span class="font-semibold">QuestionAnsweringModelOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_outputs.QuestionAnsweringModelOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_outputs.QuestionAnsweringModelOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_outputs.py#L723" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start_logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">end_logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.QuestionAnsweringModelOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.QuestionAnsweringModelOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.QuestionAnsweringModelOutput.start_logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.QuestionAnsweringModelOutput.start_logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Span-start scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.QuestionAnsweringModelOutput.end_logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.QuestionAnsweringModelOutput.end_logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Span-end scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.QuestionAnsweringModelOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.QuestionAnsweringModelOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.QuestionAnsweringModelOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.QuestionAnsweringModelOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of question answering models.</p></div> <h2 class="relative group"><a id="transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Seq2SeqQuestionAnsweringModelOutput </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_outputs.</span><span class="font-semibold">Seq2SeqQuestionAnsweringModelOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_outputs.py#L755" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start_logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">end_logits<span class="opacity-60">: FloatTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_last_hidden_state<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.start_logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.start_logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Span-start scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.end_logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.end_logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>end_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Span-end scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(torch.FloatTensor)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.decoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.decoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.decoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.decoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.encoder_last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.encoder_last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_last_hidden_state</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.encoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_outputs.Seq2SeqQuestionAnsweringModelOutput.encoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of sequence-to-sequence question answering models.</p></div> <h2 class="relative group"><a id="transformers.modeling_tf_outputs.TFBaseModelOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFBaseModelOutput </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_outputs.TFBaseModelOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_outputs.</span><span class="font-semibold">TFBaseModelOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_outputs.TFBaseModelOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_outputs.TFBaseModelOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_outputs.py#L24" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFBaseModelOutput.last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutput.last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFBaseModelOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(tf.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFBaseModelOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for model’s outputs, with potential hidden states and attentions.</p></div> <h2 class="relative group"><a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFBaseModelOutputWithPooling </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_outputs.</span><span class="font-semibold">TFBaseModelOutputWithPooling</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_outputs.py#L50" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pooler_output<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling.last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling.last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling.pooler_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling.pooler_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pooler_output</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, hidden_size)</code>) &#x2014; Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> <p>This output is usually <em>not</em> a good summary of the semantic content of the input, you&#x2019;re often better with averaging or pooling the sequence of hidden-states for the whole input sequence.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPooling.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for model’s outputs that also contains a pooling of the last hidden states.</p></div> <h2 class="relative group"><a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFBaseModelOutputWithPoolingAndCrossAttentions </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_outputs.</span><span class="font-semibold">TFBaseModelOutputWithPoolingAndCrossAttentions</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_outputs.py#L84" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pooler_output<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.List[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions.last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions.last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions.pooler_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions.pooler_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pooler_output</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, hidden_size)</code>) &#x2014; Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.</p> <p>This output is usually <em>not</em> a good summary of the semantic content of the input, you&#x2019;re often better with averaging or pooling the sequence of hidden-states for the whole input sequence.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for model’s outputs that also contains a pooling of the last hidden states.</p></div> <h2 class="relative group"><a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPast" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPast"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFBaseModelOutputWithPast </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPast"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_outputs.</span><span class="font-semibold">TFBaseModelOutputWithPast</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPast" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPast"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_outputs.py#L132" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.List[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPast.last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPast.last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPast.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPast.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPast.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPast.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPast.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPast.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for model’s outputs that may also contain a past key/values (to speed up sequential decoding).</p></div> <h2 class="relative group"><a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFBaseModelOutputWithPastAndCrossAttentions </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_outputs.</span><span class="font-semibold">TFBaseModelOutputWithPastAndCrossAttentions</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_outputs.py#L201" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.List[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions.last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions.last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(tf.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for model’s outputs that may also contain a past key/values (to speed up sequential decoding).</p></div> <h2 class="relative group"><a id="transformers.modeling_tf_outputs.TFSeq2SeqModelOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqModelOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFSeq2SeqModelOutput </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_outputs.TFSeq2SeqModelOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_outputs.</span><span class="font-semibold">TFSeq2SeqModelOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_outputs.TFSeq2SeqModelOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_outputs.TFSeq2SeqModelOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_outputs.py#L244" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.List[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_last_hidden_state<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqModelOutput.last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqModelOutput.last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqModelOutput.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqModelOutput.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqModelOutput.decoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqModelOutput.decoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqModelOutput.decoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqModelOutput.decoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqModelOutput.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqModelOutput.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqModelOutput.encoder_last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqModelOutput.encoder_last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqModelOutput.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqModelOutput.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqModelOutput.encoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqModelOutput.encoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for model encoder’s outputs that also contains : pre-computed hidden states that can speed up sequential decoding.</p></div> <h2 class="relative group"><a id="transformers.modeling_tf_outputs.TFCausalLMOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFCausalLMOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFCausalLMOutput </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_outputs.TFCausalLMOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_outputs.</span><span class="font-semibold">TFCausalLMOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_outputs.TFCausalLMOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_outputs.TFCausalLMOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_outputs.py#L304" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFCausalLMOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFCausalLMOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) &#x2014; Language modeling loss (for next-token prediction).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFCausalLMOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFCausalLMOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFCausalLMOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFCausalLMOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFCausalLMOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFCausalLMOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for causal language model (or autoregressive) outputs.</p></div> <h2 class="relative group"><a id="transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFCausalLMOutputWithCrossAttentions </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_outputs.</span><span class="font-semibold">TFCausalLMOutputWithCrossAttentions</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_outputs.py#L369" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.List[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) &#x2014; Language modeling loss (for next-token prediction).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for causal language model (or autoregressive) outputs.</p></div> <h2 class="relative group"><a id="transformers.modeling_tf_outputs.TFCausalLMOutputWithPast" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFCausalLMOutputWithPast"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFCausalLMOutputWithPast </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_outputs.TFCausalLMOutputWithPast"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_outputs.</span><span class="font-semibold">TFCausalLMOutputWithPast</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_outputs.TFCausalLMOutputWithPast" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_outputs.TFCausalLMOutputWithPast"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_outputs.py#L333" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.List[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFCausalLMOutputWithPast.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFCausalLMOutputWithPast.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) &#x2014; Language modeling loss (for next-token prediction).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFCausalLMOutputWithPast.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFCausalLMOutputWithPast.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFCausalLMOutputWithPast.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFCausalLMOutputWithPast.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFCausalLMOutputWithPast.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFCausalLMOutputWithPast.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFCausalLMOutputWithPast.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFCausalLMOutputWithPast.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for causal language model (or autoregressive) outputs.</p></div> <h2 class="relative group"><a id="transformers.modeling_tf_outputs.TFMaskedLMOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFMaskedLMOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFMaskedLMOutput </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_outputs.TFMaskedLMOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_outputs.</span><span class="font-semibold">TFMaskedLMOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_outputs.TFMaskedLMOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_outputs.TFMaskedLMOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_outputs.py#L412" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFMaskedLMOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFMaskedLMOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) &#x2014; Masked language modeling (MLM) loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFMaskedLMOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFMaskedLMOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFMaskedLMOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFMaskedLMOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFMaskedLMOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFMaskedLMOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for masked language models outputs.</p></div> <h2 class="relative group"><a id="transformers.modeling_tf_outputs.TFSeq2SeqLMOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFSeq2SeqLMOutput </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_outputs.TFSeq2SeqLMOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_outputs.</span><span class="font-semibold">TFSeq2SeqLMOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_outputs.TFSeq2SeqLMOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_outputs.py#L441" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.List[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_last_hidden_state<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqLMOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>labels</code> is provided) &#x2014; Language modeling loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqLMOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqLMOutput.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqLMOutput.decoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput.decoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqLMOutput.decoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput.decoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqLMOutput.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqLMOutput.encoder_last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput.encoder_last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqLMOutput.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqLMOutput.encoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqLMOutput.encoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for sequence-to-sequence language models outputs.</p></div> <h2 class="relative group"><a id="transformers.modeling_tf_outputs.TFNextSentencePredictorOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFNextSentencePredictorOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFNextSentencePredictorOutput </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_outputs.TFNextSentencePredictorOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_outputs.</span><span class="font-semibold">TFNextSentencePredictorOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_outputs.TFNextSentencePredictorOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_outputs.TFNextSentencePredictorOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_outputs.py#L500" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFNextSentencePredictorOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFNextSentencePredictorOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of non-masked labels, returned when <code>next_sentence_label</code> is provided) &#x2014; Next sentence prediction loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFNextSentencePredictorOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFNextSentencePredictorOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, 2)</code>) &#x2014; Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFNextSentencePredictorOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFNextSentencePredictorOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFNextSentencePredictorOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFNextSentencePredictorOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of models predicting if two sentences are consecutive or not.</p></div> <h2 class="relative group"><a id="transformers.modeling_tf_outputs.TFSequenceClassifierOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSequenceClassifierOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFSequenceClassifierOutput </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_outputs.TFSequenceClassifierOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_outputs.</span><span class="font-semibold">TFSequenceClassifierOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_outputs.TFSequenceClassifierOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_outputs.TFSequenceClassifierOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_outputs.py#L530" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSequenceClassifierOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSequenceClassifierOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Classification (or regression if config.num_labels==1) loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSequenceClassifierOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSequenceClassifierOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.num_labels)</code>) &#x2014; Classification (or regression if config.num_labels==1) scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSequenceClassifierOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSequenceClassifierOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSequenceClassifierOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSequenceClassifierOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of sentence classification models.</p></div> <h2 class="relative group"><a id="transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFSeq2SeqSequenceClassifierOutput </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_outputs.</span><span class="font-semibold">TFSeq2SeqSequenceClassifierOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_outputs.py#L559" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.List[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_last_hidden_state<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>label</code> is provided) &#x2014; Classification (or regression if config.num_labels==1) loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.num_labels)</code>) &#x2014; Classification (or regression if config.num_labels==1) scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput.decoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput.decoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput.decoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput.decoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput.encoder_last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput.encoder_last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput.encoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput.encoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of sequence-to-sequence sentence classification models.</p></div> <h2 class="relative group"><a id="transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFMultipleChoiceModelOutput </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_outputs.</span><span class="font-semibold">TFMultipleChoiceModelOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_outputs.py#L611" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>tf.Tensor</code> of shape <em>(batch_size, )</em>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Classification loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, num_choices)</code>) &#x2014; <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFMultipleChoiceModelOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of multiple choice models.</p></div> <h2 class="relative group"><a id="transformers.modeling_tf_outputs.TFTokenClassifierOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFTokenClassifierOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFTokenClassifierOutput </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_outputs.TFTokenClassifierOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_outputs.</span><span class="font-semibold">TFTokenClassifierOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_outputs.TFTokenClassifierOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_outputs.TFTokenClassifierOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_outputs.py#L642" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFTokenClassifierOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFTokenClassifierOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(n,)</code>, <em>optional</em>, where n is the number of unmasked labels, returned when <code>labels</code> is provided) &#x2014; Classification loss.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFTokenClassifierOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFTokenClassifierOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) &#x2014; Classification scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFTokenClassifierOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFTokenClassifierOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFTokenClassifierOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFTokenClassifierOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of token classification models.</p></div> <h2 class="relative group"><a id="transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFQuestionAnsweringModelOutput </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_outputs.</span><span class="font-semibold">TFQuestionAnsweringModelOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_outputs.py#L671" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start_logits<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">end_logits<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, )</code>, <em>optional</em>, returned when <code>start_positions</code> and <code>end_positions</code> are provided) &#x2014; Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput.start_logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput.start_logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Span-start scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput.end_logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput.end_logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>end_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Span-end scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFQuestionAnsweringModelOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of question answering models.</p></div> <h2 class="relative group"><a id="transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TFSeq2SeqQuestionAnsweringModelOutput </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_outputs.</span><span class="font-semibold">TFSeq2SeqQuestionAnsweringModelOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_outputs.py#L703" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start_logits<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">end_logits<span class="opacity-60">: Tensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.List[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_last_hidden_state<span class="opacity-60">: typing.Optional[tensorflow.python.framework.ops.Tensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[tensorflow.python.framework.ops.Tensor]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>tf.Tensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned when <code>labels</code> is provided) &#x2014; Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput.start_logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput.start_logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Span-start scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput.end_logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput.end_logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>end_logits</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Span-end scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>List[tf.Tensor]</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; List of <code>tf.Tensor</code> of length <code>config.n_layers</code>, with each tensor of shape <code>(2, batch_size, num_heads, sequence_length, embed_size_per_head)</code>).</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput.decoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput.decoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput.decoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput.decoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput.encoder_last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput.encoder_last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_last_hidden_state</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput.encoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput.encoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attentions</strong> (<code>tuple(tf.Tensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>tf.Tensor</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of sequence-to-sequence question answering models.</p></div> <h2 class="relative group"><a id="transformers.modeling_flax_outputs.FlaxBaseModelOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxBaseModelOutput </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_flax_outputs.FlaxBaseModelOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_flax_outputs.</span><span class="font-semibold">FlaxBaseModelOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_flax_outputs.FlaxBaseModelOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_flax_outputs.py#L23" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state<span class="opacity-60">: ndarray = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxBaseModelOutput.last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutput.last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxBaseModelOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxBaseModelOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for model’s outputs, with potential hidden states and attentions.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="None"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>replace</span></h4><!-- HTML_TAG_END --> <a id="None" class="header-link invisible with-hover:group-hover:visible pr-2" href="#None"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/flax/struct.py#L120" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**updates<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>“Returns a new object replacing the specified fields with new values.</p></div></div> <h2 class="relative group"><a id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPast" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPast"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxBaseModelOutputWithPast </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPast"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_flax_outputs.</span><span class="font-semibold">FlaxBaseModelOutputWithPast</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPast" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPast"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_flax_outputs.py#L49" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state<span class="opacity-60">: ndarray = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Union[typing.Dict[str, jax._src.numpy.lax_numpy.ndarray], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPast.last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPast.last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPast.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPast.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>Dict[str, jnp.ndarray]</code>) &#x2014; Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape <em>[batch_size, max_length]</em>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPast.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPast.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPast.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPast.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for model’s outputs, with potential hidden states and attentions.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="None"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>replace</span></h4><!-- HTML_TAG_END --> <a id="None" class="header-link invisible with-hover:group-hover:visible pr-2" href="#None"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/flax/struct.py#L120" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**updates<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>“Returns a new object replacing the specified fields with new values.</p></div></div> <h2 class="relative group"><a id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxBaseModelOutputWithPooling </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_flax_outputs.</span><span class="font-semibold">FlaxBaseModelOutputWithPooling</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_flax_outputs.py#L79" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state<span class="opacity-60">: ndarray = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pooler_output<span class="opacity-60">: ndarray = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling.last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling.last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling.pooler_output" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling.pooler_output"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pooler_output</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, hidden_size)</code>) &#x2014; Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPooling.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for model’s outputs that also contains a pooling of the last hidden states.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="None"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>replace</span></h4><!-- HTML_TAG_END --> <a id="None" class="header-link invisible with-hover:group-hover:visible pr-2" href="#None"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/flax/struct.py#L120" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**updates<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>“Returns a new object replacing the specified fields with new values.</p></div></div> <h2 class="relative group"><a id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxBaseModelOutputWithPastAndCrossAttentions </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_flax_outputs.</span><span class="font-semibold">FlaxBaseModelOutputWithPastAndCrossAttentions</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_flax_outputs.py#L110" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state<span class="opacity-60">: ndarray = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions.last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions.last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and optionally if <code>config.is_encoder_decoder=True</code> 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if <code>config.is_encoder_decoder=True</code> in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> and <code>config.add_cross_attention=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for model’s outputs that may also contain a past key/values (to speed up sequential decoding).</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="None"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>replace</span></h4><!-- HTML_TAG_END --> <a id="None" class="header-link invisible with-hover:group-hover:visible pr-2" href="#None"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/flax/struct.py#L120" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**updates<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>“Returns a new object replacing the specified fields with new values.</p></div></div> <h2 class="relative group"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxSeq2SeqModelOutput </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_flax_outputs.</span><span class="font-semibold">FlaxSeq2SeqModelOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_flax_outputs.py#L156" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">last_hidden_state<span class="opacity-60">: ndarray = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_last_hidden_state<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput.last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput.last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>) &#x2014; Sequence of hidden-states at the output of the last layer of the decoder of the model.</p> <p>If <code>past_key_values</code> is used only the last hidden-state of the sequences of shape <code>(batch_size, 1, hidden_size)</code> is output.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput.decoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput.decoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput.decoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput.decoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput.encoder_last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput.encoder_last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput.encoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqModelOutput.encoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for model encoder’s outputs that also contains : pre-computed hidden states that can speed up sequential decoding.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="None"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>replace</span></h4><!-- HTML_TAG_END --> <a id="None" class="header-link invisible with-hover:group-hover:visible pr-2" href="#None"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/flax/struct.py#L120" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**updates<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>“Returns a new object replacing the specified fields with new values.</p></div></div> <h2 class="relative group"><a id="transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxCausalLMOutputWithCrossAttentions </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_flax_outputs.</span><span class="font-semibold">FlaxCausalLMOutputWithCrossAttentions</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_flax_outputs.py#L217" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: ndarray = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> tuples of length <code>config.n_layers</code>, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if <code>config.is_decoder = True</code>.</p> <p>Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for causal language model (or autoregressive) outputs.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="None"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>replace</span></h4><!-- HTML_TAG_END --> <a id="None" class="header-link invisible with-hover:group-hover:visible pr-2" href="#None"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/flax/struct.py#L120" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**updates<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>“Returns a new object replacing the specified fields with new values.</p></div></div> <h2 class="relative group"><a id="transformers.modeling_flax_outputs.FlaxMaskedLMOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxMaskedLMOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxMaskedLMOutput </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_flax_outputs.FlaxMaskedLMOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_flax_outputs.</span><span class="font-semibold">FlaxMaskedLMOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_flax_outputs.FlaxMaskedLMOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_flax_outputs.FlaxMaskedLMOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_flax_outputs.py#L258" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: ndarray = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxMaskedLMOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxMaskedLMOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxMaskedLMOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxMaskedLMOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxMaskedLMOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxMaskedLMOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for masked language models outputs.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="None"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>replace</span></h4><!-- HTML_TAG_END --> <a id="None" class="header-link invisible with-hover:group-hover:visible pr-2" href="#None"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/flax/struct.py#L120" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**updates<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>“Returns a new object replacing the specified fields with new values.</p></div></div> <h2 class="relative group"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxSeq2SeqLMOutput </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_flax_outputs.</span><span class="font-semibold">FlaxSeq2SeqLMOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_flax_outputs.py#L287" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: ndarray = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_last_hidden_state<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.vocab_size)</code>) &#x2014; Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput.decoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput.decoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput.decoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput.decoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput.encoder_last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput.encoder_last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput.encoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqLMOutput.encoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for sequence-to-sequence language models outputs.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="None"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>replace</span></h4><!-- HTML_TAG_END --> <a id="None" class="header-link invisible with-hover:group-hover:visible pr-2" href="#None"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/flax/struct.py#L120" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**updates<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>“Returns a new object replacing the specified fields with new values.</p></div></div> <h2 class="relative group"><a id="transformers.modeling_flax_outputs.FlaxNextSentencePredictorOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxNextSentencePredictorOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxNextSentencePredictorOutput </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_flax_outputs.FlaxNextSentencePredictorOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_flax_outputs.</span><span class="font-semibold">FlaxNextSentencePredictorOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_flax_outputs.FlaxNextSentencePredictorOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_flax_outputs.FlaxNextSentencePredictorOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_flax_outputs.py#L344" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: ndarray = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxNextSentencePredictorOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxNextSentencePredictorOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, 2)</code>) &#x2014; Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxNextSentencePredictorOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxNextSentencePredictorOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxNextSentencePredictorOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxNextSentencePredictorOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of models predicting if two sentences are consecutive or not.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="None"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>replace</span></h4><!-- HTML_TAG_END --> <a id="None" class="header-link invisible with-hover:group-hover:visible pr-2" href="#None"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/flax/struct.py#L120" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**updates<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>“Returns a new object replacing the specified fields with new values.</p></div></div> <h2 class="relative group"><a id="transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxSequenceClassifierOutput </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_flax_outputs.</span><span class="font-semibold">FlaxSequenceClassifierOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_flax_outputs.py#L371" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: ndarray = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, config.num_labels)</code>) &#x2014; Classification (or regression if config.num_labels==1) scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSequenceClassifierOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of sentence classification models.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="None"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>replace</span></h4><!-- HTML_TAG_END --> <a id="None" class="header-link invisible with-hover:group-hover:visible pr-2" href="#None"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/flax/struct.py#L120" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**updates<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>“Returns a new object replacing the specified fields with new values.</p></div></div> <h2 class="relative group"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxSeq2SeqSequenceClassifierOutput </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_flax_outputs.</span><span class="font-semibold">FlaxSeq2SeqSequenceClassifierOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_flax_outputs.py#L397" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: ndarray = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_last_hidden_state<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, config.num_labels)</code>) &#x2014; Classification (or regression if config.num_labels==1) scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput.decoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput.decoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput.decoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput.decoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput.encoder_last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput.encoder_last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput.encoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput.encoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of sequence-to-sequence sentence classification models.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="None"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>replace</span></h4><!-- HTML_TAG_END --> <a id="None" class="header-link invisible with-hover:group-hover:visible pr-2" href="#None"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/flax/struct.py#L120" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**updates<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>“Returns a new object replacing the specified fields with new values.</p></div></div> <h2 class="relative group"><a id="transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxMultipleChoiceModelOutput </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_flax_outputs.</span><span class="font-semibold">FlaxMultipleChoiceModelOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_flax_outputs.py#L454" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: ndarray = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, num_choices)</code>) &#x2014; <em>num_choices</em> is the second dimension of the input tensors. (see <em>input_ids</em> above).</p> <p>Classification scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxMultipleChoiceModelOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of multiple choice models.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="None"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>replace</span></h4><!-- HTML_TAG_END --> <a id="None" class="header-link invisible with-hover:group-hover:visible pr-2" href="#None"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/flax/struct.py#L120" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**updates<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>“Returns a new object replacing the specified fields with new values.</p></div></div> <h2 class="relative group"><a id="transformers.modeling_flax_outputs.FlaxTokenClassifierOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxTokenClassifierOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxTokenClassifierOutput </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_flax_outputs.FlaxTokenClassifierOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_flax_outputs.</span><span class="font-semibold">FlaxTokenClassifierOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_flax_outputs.FlaxTokenClassifierOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_flax_outputs.FlaxTokenClassifierOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_flax_outputs.py#L482" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: ndarray = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxTokenClassifierOutput.logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxTokenClassifierOutput.logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, config.num_labels)</code>) &#x2014; Classification scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxTokenClassifierOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxTokenClassifierOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxTokenClassifierOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxTokenClassifierOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of token classification models.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="None"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>replace</span></h4><!-- HTML_TAG_END --> <a id="None" class="header-link invisible with-hover:group-hover:visible pr-2" href="#None"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/flax/struct.py#L120" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**updates<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>“Returns a new object replacing the specified fields with new values.</p></div></div> <h2 class="relative group"><a id="transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxQuestionAnsweringModelOutput </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_flax_outputs.</span><span class="font-semibold">FlaxQuestionAnsweringModelOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_flax_outputs.py#L508" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start_logits<span class="opacity-60">: ndarray = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">end_logits<span class="opacity-60">: ndarray = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput.start_logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput.start_logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start_logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Span-start scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput.end_logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput.end_logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>end_logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Span-end scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the model at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxQuestionAnsweringModelOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of question answering models.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="None"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>replace</span></h4><!-- HTML_TAG_END --> <a id="None" class="header-link invisible with-hover:group-hover:visible pr-2" href="#None"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/flax/struct.py#L120" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**updates<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>“Returns a new object replacing the specified fields with new values.</p></div></div> <h2 class="relative group"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FlaxSeq2SeqQuestionAnsweringModelOutput </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_flax_outputs.</span><span class="font-semibold">FlaxSeq2SeqQuestionAnsweringModelOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_flax_outputs.py#L537" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start_logits<span class="opacity-60">: ndarray = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">end_logits<span class="opacity-60">: ndarray = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">past_key_values<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_last_hidden_state<span class="opacity-60">: typing.Optional[jax._src.numpy.lax_numpy.ndarray] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[jax._src.numpy.lax_numpy.ndarray]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.start_logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.start_logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start_logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Span-start scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.end_logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.end_logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>end_logits</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Span-end scores (before SoftMax).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.past_key_values" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.past_key_values"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>past_key_values</strong> (<code>tuple(tuple(jnp.ndarray))</code>, <em>optional</em>, returned when <code>use_cache=True</code> is passed or when <code>config.use_cache=True</code>) &#x2014; Tuple of <code>tuple(jnp.ndarray)</code> of length <code>config.n_layers</code>, with each tuple having 2 tensors of shape <code>(batch_size, num_heads, sequence_length, embed_size_per_head)</code>) and 2 additional tensors of shape <code>(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)</code>.</p> <p>Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see <code>past_key_values</code> input) to speed up sequential decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.decoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.decoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.decoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.decoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the decoder&#x2019;s cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.encoder_last_hidden_state" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.encoder_last_hidden_state"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_last_hidden_state</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length, hidden_size)</code>, <em>optional</em>) &#x2014; Sequence of hidden-states at the output of the last layer of the encoder of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.</p> <p>Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.encoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput.encoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attentions</strong> (<code>tuple(jnp.ndarray)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or when <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>jnp.ndarray</code> (one for each layer) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.</p> <p>Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of sequence-to-sequence question answering models.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="None"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>replace</span></h4><!-- HTML_TAG_END --> <a id="None" class="header-link invisible with-hover:group-hover:visible pr-2" href="#None"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/flax/struct.py#L120" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**updates<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>“Returns a new object replacing the specified fields with new values.</p></div></div> <script type="module" data-hydrate="1sid0q2"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="1sid0q2"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/main_classes/output.mdx-bc3fe8ad.js") ], params: {} } }); </script>
437
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/main_classes/logging.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;logging&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.utils.logging.set_verbosity_error&quot;,&quot;title&quot;:&quot;Base setters&quot;},{&quot;local&quot;:&quot;transformers.utils.logging.get_verbosity&quot;,&quot;title&quot;:&quot;Other functions&quot;}],&quot;title&quot;:&quot;Logging&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/main_classes/logging.mdx-8b2a9a6f.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="logging" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#logging"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Logging </span></h1> <p>🤗 Transformers has a centralized logging system, so that you can setup the verbosity of the library easily.</p> <p>Currently the default verbosity of the library is <code>WARNING</code>.</p> <p>To change the level of verbosity, just use one of the direct setters. For instance, here is how to change the verbosity to the INFO level.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">import</span> transformers transformers.logging.set_verbosity_info()<!-- HTML_TAG_END --></pre></div> <p>You can also use the environment variable <code>TRANSFORMERS_VERBOSITY</code> to override the default verbosity. You can set it to one of the following: <code>debug</code>, <code>info</code>, <code>warning</code>, <code>error</code>, <code>critical</code>. For example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->TRANSFORMERS_VERBOSITY=error ./myprogram.py<!-- HTML_TAG_END --></pre></div> <p>Additionally, some <code>warnings</code> can be disabled by setting the environment variable <code>TRANSFORMERS_NO_ADVISORY_WARNINGS</code> to a true value, like <em>1</em>. This will disable any warning that is logged using <code>logger.warning_advice()</code> For example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->TRANSFORMERS_NO_ADVISORY_WARNINGS=1 ./myprogram.py<!-- HTML_TAG_END --></pre></div> <p>Here is an example of how to use <code>logging</code> in a module:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers.utils <span class="hljs-keyword">import</span> logging logging.set_verbosity_info() logger = logging.get_logger(__name__) logger.info(<span class="hljs-string">&quot;INFO&quot;</span>) logger.warning(<span class="hljs-string">&quot;WARN&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Above, a <code>logger</code> instance is created from <code>logging.get_logger(__name__)</code>. If you want to use <code>logging</code> in a script, you shouldn’t pass <code>__name__</code> to <code>logging.get_logger</code>. For example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers.utils <span class="hljs-keyword">import</span> logging <span class="hljs-keyword">if</span> __name__ == <span class="hljs-string">&quot;__main__&quot;</span>: logging.set_verbosity_info() <span class="hljs-comment"># leave it empy or use a string</span> logger = logging.get_logger() logger.info(<span class="hljs-string">&quot;INFO&quot;</span>) logger.warning(<span class="hljs-string">&quot;WARN&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>All the methods of this logging module are documented below, the main ones are <a href="/docs/transformers/pr_16143/en/main_classes/logging#transformers.utils.logging.get_verbosity">logging.get_verbosity()</a> to get the current level of verbosity in the logger and <a href="/docs/transformers/pr_16143/en/main_classes/logging#transformers.utils.logging.set_verbosity">logging.set_verbosity()</a> to set the verbosity to the level of your choice. In order (from the least verbose to the most verbose), those levels (with their corresponding int values in parenthesis) are:</p> <ul><li><code>transformers.logging.CRITICAL</code> or <code>transformers.logging.FATAL</code> (int value, 50): only report the most critical errors.</li> <li><code>transformers.logging.ERROR</code> (int value, 40): only report errors.</li> <li><code>transformers.logging.WARNING</code> or <code>transformers.logging.WARN</code> (int value, 30): only reports error and warnings. This the default level used by the library.</li> <li><code>transformers.logging.INFO</code> (int value, 20): reports error, warnings and basic information.</li> <li><code>transformers.logging.DEBUG</code> (int value, 10): report all information.</li></ul> <p>By default, <code>tqdm</code> progress bars will be displayed during model download. <a href="/docs/transformers/pr_16143/en/main_classes/logging#transformers.utils.logging.disable_progress_bar">logging.disable_progress_bar()</a> and <a href="/docs/transformers/pr_16143/en/main_classes/logging#transformers.utils.logging.enable_progress_bar">logging.enable_progress_bar()</a> can be used to suppress or unsuppress this behavior.</p> <h2 class="relative group"><a id="transformers.utils.logging.set_verbosity_error" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.utils.logging.set_verbosity_error"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Base setters </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.utils.logging.set_verbosity_error"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.utils.logging.set_verbosity_error</span></h4><!-- HTML_TAG_END --> <a id="transformers.utils.logging.set_verbosity_error" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.utils.logging.set_verbosity_error"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/utils/logging.py#L184" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Set the verbosity to the <code>ERROR</code> level.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.utils.logging.set_verbosity_warning"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.utils.logging.set_verbosity_warning</span></h4><!-- HTML_TAG_END --> <a id="transformers.utils.logging.set_verbosity_warning" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.utils.logging.set_verbosity_warning"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/utils/logging.py#L174" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Set the verbosity to the <code>WARNING</code> level.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.utils.logging.set_verbosity_info"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.utils.logging.set_verbosity_info</span></h4><!-- HTML_TAG_END --> <a id="transformers.utils.logging.set_verbosity_info" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.utils.logging.set_verbosity_info"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/utils/logging.py#L169" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Set the verbosity to the <code>INFO</code> level.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.utils.logging.set_verbosity_debug"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.utils.logging.set_verbosity_debug</span></h4><!-- HTML_TAG_END --> <a id="transformers.utils.logging.set_verbosity_debug" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.utils.logging.set_verbosity_debug"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/utils/logging.py#L179" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Set the verbosity to the <code>DEBUG</code> level.</p></div> <h2 class="relative group"><a id="transformers.utils.logging.get_verbosity" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.utils.logging.get_verbosity"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Other functions </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.utils.logging.get_verbosity"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.utils.logging.get_verbosity</span></h4><!-- HTML_TAG_END --> <a id="transformers.utils.logging.get_verbosity" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.utils.logging.get_verbosity"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/utils/logging.py#L127" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>int</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.utils.logging.get_verbosity.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>int</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The logging level.</p> <!-- HTML_TAG_END --></p></div></div> <p>Return the current level for the 🤗 Transformers’s root logger as an int.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>🤗 Transformers has following logging levels:</p> <ul><li>50: <code>transformers.logging.CRITICAL</code> or <code>transformers.logging.FATAL</code></li> <li>40: <code>transformers.logging.ERROR</code></li> <li>30: <code>transformers.logging.WARNING</code> or <code>transformers.logging.WARN</code></li> <li>20: <code>transformers.logging.INFO</code></li> <li>10: <code>transformers.logging.DEBUG</code></li></ul></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.utils.logging.set_verbosity"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.utils.logging.set_verbosity</span></h4><!-- HTML_TAG_END --> <a id="transformers.utils.logging.set_verbosity" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.utils.logging.set_verbosity"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/utils/logging.py#L150" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">verbosity<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.utils.logging.set_verbosity.verbosity" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.utils.logging.set_verbosity.verbosity"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>verbosity</strong> (<code>int</code>) &#x2014; Logging level, e.g., one of:</p> <ul> <li><code>transformers.logging.CRITICAL</code> or <code>transformers.logging.FATAL</code></li> <li><code>transformers.logging.ERROR</code></li> <li><code>transformers.logging.WARNING</code> or <code>transformers.logging.WARN</code></li> <li><code>transformers.logging.INFO</code></li> <li><code>transformers.logging.DEBUG</code></li> </ul><!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Set the verbosity level for the 🤗 Transformers’s root logger.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.utils.logging.get_logger"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.utils.logging.get_logger</span></h4><!-- HTML_TAG_END --> <a id="transformers.utils.logging.get_logger" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.utils.logging.get_logger"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/utils/logging.py#L113" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">name<span class="opacity-60">: typing.Optional[str] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Return a logger with the specified name.</p> <p>This function is not supposed to be directly accessed unless you are writing a custom transformers module.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.utils.logging.enable_default_handler"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.utils.logging.enable_default_handler</span></h4><!-- HTML_TAG_END --> <a id="transformers.utils.logging.enable_default_handler" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.utils.logging.enable_default_handler"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/utils/logging.py#L198" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Enable the default handler of the HuggingFace Transformers’s root logger.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.utils.logging.disable_default_handler"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.utils.logging.disable_default_handler</span></h4><!-- HTML_TAG_END --> <a id="transformers.utils.logging.disable_default_handler" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.utils.logging.disable_default_handler"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/utils/logging.py#L189" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Disable the default handler of the HuggingFace Transformers’s root logger.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.utils.logging.enable_explicit_format"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.utils.logging.enable_explicit_format</span></h4><!-- HTML_TAG_END --> <a id="transformers.utils.logging.enable_explicit_format" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.utils.logging.enable_explicit_format"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/utils/logging.py#L244" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Enable explicit formatting for every HuggingFace Transformers’s logger. The explicit formatter is as follows:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --> [LEVELNAME|<span class="hljs-type">FILENAME</span>|<span class="hljs-type">LINE</span> NUMBER] TIME &gt;&gt; MESSAGE<!-- HTML_TAG_END --></pre></div> <p>All handlers currently bound to the root logger are affected by this method.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.utils.logging.reset_format"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.utils.logging.reset_format</span></h4><!-- HTML_TAG_END --> <a id="transformers.utils.logging.reset_format" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.utils.logging.reset_format"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/utils/logging.py#L259" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Resets the formatting for HuggingFace Transformers’s loggers.</p> <p>All handlers currently bound to the root logger are affected by this method.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.utils.logging.enable_progress_bar"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.utils.logging.enable_progress_bar</span></h4><!-- HTML_TAG_END --> <a id="transformers.utils.logging.enable_progress_bar" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.utils.logging.enable_progress_bar"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/utils/logging.py#L335" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Enable tqdm progress bar.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.utils.logging.disable_progress_bar"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.utils.logging.disable_progress_bar</span></h4><!-- HTML_TAG_END --> <a id="transformers.utils.logging.disable_progress_bar" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.utils.logging.disable_progress_bar"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/utils/logging.py#L341" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Enable tqdm progress bar.</p></div> <script type="module" data-hydrate="1w7dvnw"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="1w7dvnw"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/main_classes/logging.mdx-8b2a9a6f.js") ], params: {} } }); </script>
438
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/main_classes/onnx.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;exporting-transformers-models-to-onnx&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;onnx-configurations&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.onnx.OnnxConfig&quot;,&quot;title&quot;:&quot;OnnxConfig&quot;},{&quot;local&quot;:&quot;transformers.onnx.OnnxConfigWithPast&quot;,&quot;title&quot;:&quot;OnnxConfigWithPast&quot;},{&quot;local&quot;:&quot;transformers.onnx.OnnxSeq2SeqConfigWithPast&quot;,&quot;title&quot;:&quot;OnnxSeq2SeqConfigWithPast&quot;}],&quot;title&quot;:&quot;ONNX Configurations&quot;},{&quot;local&quot;:&quot;onnx-features&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.onnx.FeaturesManager&quot;,&quot;title&quot;:&quot;FeaturesManager&quot;}],&quot;title&quot;:&quot;ONNX Features&quot;}],&quot;title&quot;:&quot;Exporting 🤗 Transformers models to ONNX&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/main_classes/onnx.mdx-da087fbf.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <h1 class="relative group"><a id="exporting-transformers-models-to-onnx" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#exporting-transformers-models-to-onnx"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Exporting 🤗 Transformers models to ONNX </span></h1> <p>🤗 Transformers provides a <code>transformers.onnx</code> package that enables you to convert model checkpoints to an ONNX graph by leveraging configuration objects.</p> <p>See the <a href="../serialization">guide</a> on exporting 🤗 Transformers models for more details.</p> <h2 class="relative group"><a id="onnx-configurations" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#onnx-configurations"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ONNX Configurations </span></h2> <p>We provide three abstract classes that you should inherit from, depending on the type of model architecture you wish to export:</p> <ul><li>Encoder-based models inherit from <a href="/docs/transformers/pr_16143/en/main_classes/onnx#transformers.onnx.OnnxConfig">OnnxConfig</a></li> <li>Decoder-based models inherit from <a href="/docs/transformers/pr_16143/en/main_classes/onnx#transformers.onnx.OnnxConfigWithPast">OnnxConfigWithPast</a></li> <li>Encoder-decoder models inherit from <a href="/docs/transformers/pr_16143/en/main_classes/onnx#transformers.onnx.OnnxSeq2SeqConfigWithPast">OnnxSeq2SeqConfigWithPast</a></li></ul> <h3 class="relative group"><a id="transformers.onnx.OnnxConfig" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.onnx.OnnxConfig"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>OnnxConfig </span></h3> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.onnx.OnnxConfig"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.onnx.</span><span class="font-semibold">OnnxConfig</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.onnx.OnnxConfig" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.onnx.OnnxConfig"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/onnx/config.py#L68" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: PretrainedConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">task<span class="opacity-60">: str = &#39;default&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">patching_specs<span class="opacity-60">: typing.List[transformers.onnx.config.PatchingSpec] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Base class for ONNX exportable model describing metadata on how to export the model through the ONNX format.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.onnx.OnnxConfig.flatten_output_collection_property"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>flatten_output_collection_property</span></h4><!-- HTML_TAG_END --> <a id="transformers.onnx.OnnxConfig.flatten_output_collection_property" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.onnx.OnnxConfig.flatten_output_collection_property"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/onnx/config.py#L320" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">name<span class="opacity-60">: str</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">field<span class="opacity-60">: typing.Iterable[typing.Any]</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>(Dict[str, Any])</span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.onnx.OnnxConfig.flatten_output_collection_property.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>(Dict[str, Any])</p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Outputs with flattened structure and key mapping this new structure.</p> <!-- HTML_TAG_END --></p></div></div> <p>Flatten any potential nested structure expanding the name of the field with the index of the element within the structure.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.onnx.OnnxConfig.from_model_config"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>from_model_config</span></h4><!-- HTML_TAG_END --> <a id="transformers.onnx.OnnxConfig.from_model_config" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.onnx.OnnxConfig.from_model_config"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/onnx/config.py#L109" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: PretrainedConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">task<span class="opacity-60">: str = &#39;default&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Instantiate a OnnxConfig for a specific model</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.onnx.OnnxConfig.generate_dummy_inputs"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>generate_dummy_inputs</span></h4><!-- HTML_TAG_END --> <a id="transformers.onnx.OnnxConfig.generate_dummy_inputs" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.onnx.OnnxConfig.generate_dummy_inputs"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/onnx/config.py#L239" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">preprocessor<span class="opacity-60">: typing.Union[ForwardRef(&#39;PreTrainedTokenizerBase&#39;), ForwardRef(&#39;FeatureExtractionMixin&#39;)]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">batch_size<span class="opacity-60">: int = -1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">seq_length<span class="opacity-60">: int = -1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">is_pair<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">framework<span class="opacity-60">: typing.Optional[transformers.file_utils.TensorType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_channels<span class="opacity-60">: int = 3</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">image_width<span class="opacity-60">: int = 40</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">image_height<span class="opacity-60">: int = 40</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer<span class="opacity-60">: PreTrainedTokenizerBase = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.onnx.OnnxConfig.generate_dummy_inputs.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.onnx.OnnxConfig.generate_dummy_inputs.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; The batch size to export the model for (-1 means dynamic axis).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.onnx.OnnxConfig.generate_dummy_inputs.seq_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.onnx.OnnxConfig.generate_dummy_inputs.seq_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>seq_length</strong> (<code>int</code>, <em>optional</em>, defaults to -1) &#x2014; The sequence length to export the model for (-1 means dynamic axis).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.onnx.OnnxConfig.generate_dummy_inputs.is_pair" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.onnx.OnnxConfig.generate_dummy_inputs.is_pair"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>is_pair</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Indicate if the input is a pair (sentence 1, sentence 2)<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.onnx.OnnxConfig.generate_dummy_inputs.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.onnx.OnnxConfig.generate_dummy_inputs.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>TensorType</code>, <em>optional</em>, defaults to <code>None</code>) &#x2014; The framework (PyTorch or TensorFlow) that the tokenizer will generate tensors for.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.onnx.OnnxConfig.generate_dummy_inputs.num_channels" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.onnx.OnnxConfig.generate_dummy_inputs.num_channels"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_channels</strong> (<code>int</code>, <em>optional</em>, defaults to 3) &#x2014; The number of channels of the generated images.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.onnx.OnnxConfig.generate_dummy_inputs.image_width" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.onnx.OnnxConfig.generate_dummy_inputs.image_width"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>image_width</strong> (<code>int</code>, <em>optional</em>, defaults to 40) &#x2014; The width of the generated images.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.onnx.OnnxConfig.generate_dummy_inputs.image_height" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.onnx.OnnxConfig.generate_dummy_inputs.image_height"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>image_height</strong> (<code>int</code>, <em>optional</em>, defaults to 40) &#x2014; The height of the generated images.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Generate inputs to provide to the ONNX exporter for the specific framework</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.onnx.OnnxConfig.use_external_data_format"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>use_external_data_format</span></h4><!-- HTML_TAG_END --> <a id="transformers.onnx.OnnxConfig.use_external_data_format" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.onnx.OnnxConfig.use_external_data_format"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/onnx/config.py#L213" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_parameters<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Flag indicating if the model requires using external data format</p></div></div> <h3 class="relative group"><a id="transformers.onnx.OnnxConfigWithPast" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.onnx.OnnxConfigWithPast"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>OnnxConfigWithPast </span></h3> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.onnx.OnnxConfigWithPast"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.onnx.</span><span class="font-semibold">OnnxConfigWithPast</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.onnx.OnnxConfigWithPast" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.onnx.OnnxConfigWithPast"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/onnx/config.py#L339" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: PretrainedConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">task<span class="opacity-60">: str = &#39;default&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">patching_specs<span class="opacity-60">: typing.List[transformers.onnx.config.PatchingSpec] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_past<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.onnx.OnnxConfigWithPast.fill_with_past_key_values_"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>fill_with_past_key_values_</span></h4><!-- HTML_TAG_END --> <a id="transformers.onnx.OnnxConfigWithPast.fill_with_past_key_values_" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.onnx.OnnxConfigWithPast.fill_with_past_key_values_"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/onnx/config.py#L441" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs_or_outputs<span class="opacity-60">: typing.Mapping[str, typing.Mapping[int, str]]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">direction<span class="opacity-60">: str</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Fill the input_or_ouputs mapping with past_key_values dynamic axes considering.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.onnx.OnnxConfigWithPast.with_past"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>with_past</span></h4><!-- HTML_TAG_END --> <a id="transformers.onnx.OnnxConfigWithPast.with_past" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.onnx.OnnxConfigWithPast.with_past"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/onnx/config.py#L350" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: PretrainedConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">task<span class="opacity-60">: str = &#39;default&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Instantiate a OnnxConfig with <code>use_past</code> attribute set to True</p></div></div> <h3 class="relative group"><a id="transformers.onnx.OnnxSeq2SeqConfigWithPast" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.onnx.OnnxSeq2SeqConfigWithPast"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>OnnxSeq2SeqConfigWithPast </span></h3> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.onnx.OnnxSeq2SeqConfigWithPast"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.onnx.</span><span class="font-semibold">OnnxSeq2SeqConfigWithPast</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.onnx.OnnxSeq2SeqConfigWithPast" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.onnx.OnnxSeq2SeqConfigWithPast"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/onnx/config.py#L474" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: PretrainedConfig</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">task<span class="opacity-60">: str = &#39;default&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">patching_specs<span class="opacity-60">: typing.List[transformers.onnx.config.PatchingSpec] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_past<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div> <h2 class="relative group"><a id="onnx-features" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#onnx-features"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>ONNX Features </span></h2> <p>Each ONNX configuration is associated with a set of <em>features</em> that enable you to export models for different types of topologies or tasks.</p> <h3 class="relative group"><a id="transformers.onnx.FeaturesManager" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.onnx.FeaturesManager"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>FeaturesManager </span></h3> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.onnx.FeaturesManager"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.onnx.</span><span class="font-semibold">FeaturesManager</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.onnx.FeaturesManager" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.onnx.FeaturesManager"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/onnx/features.py#L84" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.onnx.FeaturesManager.check_supported_model_or_raise"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>check_supported_model_or_raise</span></h4><!-- HTML_TAG_END --> <a id="transformers.onnx.FeaturesManager.check_supported_model_or_raise" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.onnx.FeaturesManager.check_supported_model_or_raise"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/onnx/features.py#L355" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model<span class="opacity-60">: typing.Union[transformers.modeling_utils.PreTrainedModel, transformers.modeling_tf_utils.TFPreTrainedModel]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">feature<span class="opacity-60">: str = &#39;default&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Check whether or not the model has the requested features.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.onnx.FeaturesManager.get_model_class_for_feature"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_model_class_for_feature</span></h4><!-- HTML_TAG_END --> <a id="transformers.onnx.FeaturesManager.get_model_class_for_feature" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.onnx.FeaturesManager.get_model_class_for_feature"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/onnx/features.py#L300" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">feature<span class="opacity-60">: str</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">framework<span class="opacity-60">: str = &#39;pt&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.onnx.FeaturesManager.get_model_class_for_feature.feature" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.onnx.FeaturesManager.get_model_class_for_feature.feature"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>feature</strong> (<code>str</code>) &#x2014; The feature required.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.onnx.FeaturesManager.get_model_class_for_feature.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.onnx.FeaturesManager.get_model_class_for_feature.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;pt&quot;</code>) &#x2014; The framework to use for the export.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Attempts to retrieve an AutoModel class from a feature name.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.onnx.FeaturesManager.get_model_from_feature"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_model_from_feature</span></h4><!-- HTML_TAG_END --> <a id="transformers.onnx.FeaturesManager.get_model_from_feature" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.onnx.FeaturesManager.get_model_from_feature"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/onnx/features.py#L327" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">feature<span class="opacity-60">: str</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model<span class="opacity-60">: str</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">framework<span class="opacity-60">: str = &#39;pt&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.onnx.FeaturesManager.get_model_from_feature.feature" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.onnx.FeaturesManager.get_model_from_feature.feature"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>feature</strong> (<code>str</code>) &#x2014; The feature required.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.onnx.FeaturesManager.get_model_from_feature.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.onnx.FeaturesManager.get_model_from_feature.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<code>str</code>) &#x2014; The name of the model to export.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.onnx.FeaturesManager.get_model_from_feature.framework" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.onnx.FeaturesManager.get_model_from_feature.framework"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>framework</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;pt&quot;</code>) &#x2014; The framework to use for the export.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Attempts to retrieve a model from a model’s name and the feature to be enabled.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.onnx.FeaturesManager.get_supported_features_for_model_type"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_supported_features_for_model_type</span></h4><!-- HTML_TAG_END --> <a id="transformers.onnx.FeaturesManager.get_supported_features_for_model_type" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.onnx.FeaturesManager.get_supported_features_for_model_type"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/onnx/features.py#L255" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model_type<span class="opacity-60">: str</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model_name<span class="opacity-60">: typing.Optional[str] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.onnx.FeaturesManager.get_supported_features_for_model_type.model_type" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.onnx.FeaturesManager.get_supported_features_for_model_type.model_type"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model_type</strong> (<code>str</code>) &#x2014; The model type to retrieve the supported features for.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.onnx.FeaturesManager.get_supported_features_for_model_type.model_name" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.onnx.FeaturesManager.get_supported_features_for_model_type.model_name"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model_name</strong> (<code>str</code>, <em>optional</em>) &#x2014; The name attribute of the model object, only used for the exception message.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Tries to retrieve the feature -&gt; OnnxConfig constructor map from the model type.</p></div></div> <script type="module" data-hydrate="j9serd"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="j9serd"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/main_classes/onnx.mdx-da087fbf.js") ], params: {} } }); </script>
439
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/tasks/language_modeling.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;language-modeling&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;load-eli5-dataset&quot;,&quot;title&quot;:&quot;Load ELI5 dataset&quot;},{&quot;local&quot;:&quot;preprocess&quot;,&quot;title&quot;:&quot;Preprocess&quot;},{&quot;local&quot;:&quot;causal-language-modeling&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;finetune-with-trainer&quot;,&quot;title&quot;:&quot;Fine-tune with Trainer&quot;},{&quot;local&quot;:&quot;finetune-with-tensorflow&quot;,&quot;title&quot;:&quot;Fine-tune with TensorFlow&quot;}],&quot;title&quot;:&quot;Causal language modeling&quot;},{&quot;local&quot;:&quot;masked-language-modeling&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;finetune-with-trainer&quot;,&quot;title&quot;:&quot;Fine-tune with Trainer&quot;},{&quot;local&quot;:&quot;finetune-with-tensorflow&quot;,&quot;title&quot;:&quot;Fine-tune with TensorFlow&quot;}],&quot;title&quot;:&quot;Masked language modeling&quot;}],&quot;title&quot;:&quot;Language modeling&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/tasks/language_modeling.mdx-e8a233ab.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Youtube-27813aed.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlockFw-27a176a0.js"> <h1 class="relative group"><a id="language-modeling" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#language-modeling"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Language modeling </span></h1> <p>Language modeling predicts words in a sentence. There are two forms of language modeling.</p> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/Vpjb1lu0MDk" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>Causal language modeling predicts the next token in a sequence of tokens, and the model can only attend to tokens on the left.</p> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/mqElG5QJWUg" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>Masked language modeling predicts a masked token in a sequence, and the model can attend to tokens bidirectionally.</p> <p>This guide will show you how to fine-tune <a href="https://huggingface.co/distilgpt2" rel="nofollow">DistilGPT2</a> for causal language modeling and <a href="https://huggingface.co/distilroberta-base" rel="nofollow">DistilRoBERTa</a> for masked language modeling on the <a href="https://www.reddit.com/r/askscience/" rel="nofollow">r/askscience</a> subset of the <a href="https://huggingface.co/datasets/eli5" rel="nofollow">ELI5</a> dataset.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>You can fine-tune other architectures for language modeling such as <a href="https://huggingface.co/EleutherAI/gpt-neo-125M" rel="nofollow">GPT-Neo</a>, <a href="https://huggingface.co/EleutherAI/gpt-j-6B" rel="nofollow">GPT-J</a>, and <a href="https://huggingface.co/bert-base-uncased" rel="nofollow">BERT</a>, following the same steps presented in this guide!</p> <p>See the text generation <a href="https://huggingface.co/tasks/text-generation" rel="nofollow">task page</a> and fill mask <a href="https://huggingface.co/tasks/fill-mask" rel="nofollow">task page</a> for more information about their associated models, datasets, and metrics.</p></div> <h2 class="relative group"><a id="load-eli5-dataset" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#load-eli5-dataset"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Load ELI5 dataset </span></h2> <p>Load only the first 5000 rows of the ELI5 dataset from the 🤗 Datasets library since it is pretty large:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>eli5 = load_dataset(<span class="hljs-string">&quot;eli5&quot;</span>, split=<span class="hljs-string">&quot;train_asks[:5000]&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Split this dataset into a train and test set:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->eli5 = eli5.train_test_split(test_size=<span class="hljs-number">0.2</span>)<!-- HTML_TAG_END --></pre></div> <p>Then take a look at an example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>eli5[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;answers&#x27;</span>: {<span class="hljs-string">&#x27;a_id&#x27;</span>: [<span class="hljs-string">&#x27;c3d1aib&#x27;</span>, <span class="hljs-string">&#x27;c3d4lya&#x27;</span>], <span class="hljs-string">&#x27;score&#x27;</span>: [<span class="hljs-number">6</span>, <span class="hljs-number">3</span>], <span class="hljs-string">&#x27;text&#x27;</span>: [<span class="hljs-string">&quot;The velocity needed to remain in orbit is equal to the square root of Newton&#x27;s constant times the mass of earth divided by the distance from the center of the earth. I don&#x27;t know the altitude of that specific mission, but they&#x27;re usually around 300 km. That means he&#x27;s going 7-8 km/s.\n\nIn space there are no other forces acting on either the shuttle or the guy, so they stay in the same position relative to each other. If he were to become unable to return to the ship, he would presumably run out of oxygen, or slowly fall into the atmosphere and burn up.&quot;</span>, <span class="hljs-string">&quot;Hope you don&#x27;t mind me asking another question, but why aren&#x27;t there any stars visible in this photo?&quot;</span>]}, <span class="hljs-string">&#x27;answers_urls&#x27;</span>: {<span class="hljs-string">&#x27;url&#x27;</span>: []}, <span class="hljs-string">&#x27;document&#x27;</span>: <span class="hljs-string">&#x27;&#x27;</span>, <span class="hljs-string">&#x27;q_id&#x27;</span>: <span class="hljs-string">&#x27;nyxfp&#x27;</span>, <span class="hljs-string">&#x27;selftext&#x27;</span>: <span class="hljs-string">&#x27;_URL_0_\n\nThis was on the front page earlier and I have a few questions about it. Is it possible to calculate how fast the astronaut would be orbiting the earth? Also how does he stay close to the shuttle so that he can return safely, i.e is he orbiting at the same speed and can therefore stay next to it? And finally if his propulsion system failed, would he eventually re-enter the atmosphere and presumably die?&#x27;</span>, <span class="hljs-string">&#x27;selftext_urls&#x27;</span>: {<span class="hljs-string">&#x27;url&#x27;</span>: [<span class="hljs-string">&#x27;http://apod.nasa.gov/apod/image/1201/freeflyer_nasa_3000.jpg&#x27;</span>]}, <span class="hljs-string">&#x27;subreddit&#x27;</span>: <span class="hljs-string">&#x27;askscience&#x27;</span>, <span class="hljs-string">&#x27;title&#x27;</span>: <span class="hljs-string">&#x27;Few questions about this space walk photograph.&#x27;</span>, <span class="hljs-string">&#x27;title_urls&#x27;</span>: {<span class="hljs-string">&#x27;url&#x27;</span>: []}}<!-- HTML_TAG_END --></pre></div> <p>Notice <code>text</code> is a subfield nested inside the <code>answers</code> dictionary. When you preprocess the dataset, you will need to extract the <code>text</code> subfield into a separate column.</p> <h2 class="relative group"><a id="preprocess" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#preprocess"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Preprocess </span></h2> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/ma1TrR7gE7I" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>For causal language modeling, load the DistilGPT2 tokenizer to process the <code>text</code> subfield:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilgpt2&quot;</span>)<!-- HTML_TAG_END --></pre></div> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/8PmhEIXhBvI" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>For masked language modeling, load the DistilRoBERTa tokenizer instead:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilroberta-base&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Extract the <code>text</code> subfield from its nested structure with the <a href="https://huggingface.co/docs/datasets/process.html#flatten" rel="nofollow"><code>flatten</code></a> method:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>eli5 = eli5.flatten() <span class="hljs-meta">&gt;&gt;&gt; </span>eli5[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;answers.a_id&#x27;</span>: [<span class="hljs-string">&#x27;c3d1aib&#x27;</span>, <span class="hljs-string">&#x27;c3d4lya&#x27;</span>], <span class="hljs-string">&#x27;answers.score&#x27;</span>: [<span class="hljs-number">6</span>, <span class="hljs-number">3</span>], <span class="hljs-string">&#x27;answers.text&#x27;</span>: [<span class="hljs-string">&quot;The velocity needed to remain in orbit is equal to the square root of Newton&#x27;s constant times the mass of earth divided by the distance from the center of the earth. I don&#x27;t know the altitude of that specific mission, but they&#x27;re usually around 300 km. That means he&#x27;s going 7-8 km/s.\n\nIn space there are no other forces acting on either the shuttle or the guy, so they stay in the same position relative to each other. If he were to become unable to return to the ship, he would presumably run out of oxygen, or slowly fall into the atmosphere and burn up.&quot;</span>, <span class="hljs-string">&quot;Hope you don&#x27;t mind me asking another question, but why aren&#x27;t there any stars visible in this photo?&quot;</span>], <span class="hljs-string">&#x27;answers_urls.url&#x27;</span>: [], <span class="hljs-string">&#x27;document&#x27;</span>: <span class="hljs-string">&#x27;&#x27;</span>, <span class="hljs-string">&#x27;q_id&#x27;</span>: <span class="hljs-string">&#x27;nyxfp&#x27;</span>, <span class="hljs-string">&#x27;selftext&#x27;</span>: <span class="hljs-string">&#x27;_URL_0_\n\nThis was on the front page earlier and I have a few questions about it. Is it possible to calculate how fast the astronaut would be orbiting the earth? Also how does he stay close to the shuttle so that he can return safely, i.e is he orbiting at the same speed and can therefore stay next to it? And finally if his propulsion system failed, would he eventually re-enter the atmosphere and presumably die?&#x27;</span>, <span class="hljs-string">&#x27;selftext_urls.url&#x27;</span>: [<span class="hljs-string">&#x27;http://apod.nasa.gov/apod/image/1201/freeflyer_nasa_3000.jpg&#x27;</span>], <span class="hljs-string">&#x27;subreddit&#x27;</span>: <span class="hljs-string">&#x27;askscience&#x27;</span>, <span class="hljs-string">&#x27;title&#x27;</span>: <span class="hljs-string">&#x27;Few questions about this space walk photograph.&#x27;</span>, <span class="hljs-string">&#x27;title_urls.url&#x27;</span>: []}<!-- HTML_TAG_END --></pre></div> <p>Each subfield is now a separate column as indicated by the <code>answers</code> prefix. Notice that <code>answers.text</code> is a list. Instead of tokenizing each sentence separately, convert the list to a string to jointly tokenize them.</p> <p>Here is how you can create a preprocessing function to convert the list to a string and truncate sequences to be no longer than DistilGPT2’s maximum input length:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">preprocess_function</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> tokenizer([<span class="hljs-string">&quot; &quot;</span>.join(x) <span class="hljs-keyword">for</span> x <span class="hljs-keyword">in</span> examples[<span class="hljs-string">&quot;answers.text&quot;</span>]], truncation=<span class="hljs-literal">True</span>)<!-- HTML_TAG_END --></pre></div> <p>Use 🤗 Datasets <a href="https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map" rel="nofollow"><code>map</code></a> function to apply the preprocessing function over the entire dataset. You can speed up the <code>map</code> function by setting <code>batched=True</code> to process multiple elements of the dataset at once and increasing the number of processes with <code>num_proc</code>. Remove the columns you don’t need:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tokenized_eli5 = eli5.<span class="hljs-built_in">map</span>( <span class="hljs-meta">... </span> preprocess_function, <span class="hljs-meta">... </span> batched=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> num_proc=<span class="hljs-number">4</span>, <span class="hljs-meta">... </span> remove_columns=eli5[<span class="hljs-string">&quot;train&quot;</span>].column_names, <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <p>Now you need a second preprocessing function to capture text truncated from any lengthy examples to prevent loss of information. This preprocessing function should:</p> <ul><li>Concatenate all the text.</li> <li>Split the concatenated text into smaller chunks defined by <code>block_size</code>.</li></ul> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>block_size = <span class="hljs-number">128</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">group_texts</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> concatenated_examples = {k: <span class="hljs-built_in">sum</span>(examples[k], []) <span class="hljs-keyword">for</span> k <span class="hljs-keyword">in</span> examples.keys()} <span class="hljs-meta">... </span> total_length = <span class="hljs-built_in">len</span>(concatenated_examples[<span class="hljs-built_in">list</span>(examples.keys())[<span class="hljs-number">0</span>]]) <span class="hljs-meta">... </span> result = { <span class="hljs-meta">... </span> k: [t[i : i + block_size] <span class="hljs-keyword">for</span> i <span class="hljs-keyword">in</span> <span class="hljs-built_in">range</span>(<span class="hljs-number">0</span>, total_length, block_size)] <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> k, t <span class="hljs-keyword">in</span> concatenated_examples.items() <span class="hljs-meta">... </span> } <span class="hljs-meta">... </span> result[<span class="hljs-string">&quot;labels&quot;</span>] = result[<span class="hljs-string">&quot;input_ids&quot;</span>].copy() <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> result<!-- HTML_TAG_END --></pre></div> <p>Apply the <code>group_texts</code> function over the entire dataset:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>lm_dataset = tokenized_eli5.<span class="hljs-built_in">map</span>(group_texts, batched=<span class="hljs-literal">True</span>, num_proc=<span class="hljs-number">4</span>)<!-- HTML_TAG_END --></pre></div> <p>For causal language modeling, use <a href="/docs/transformers/pr_16143/en/main_classes/data_collator#transformers.DataCollatorForLanguageModeling">DataCollatorForLanguageModeling</a> to create a batch of examples. It will also <em>dynamically pad</em> your text to the length of the longest element in its batch, so they are a uniform length. While it is possible to pad your text in the <code>tokenizer</code> function by setting <code>padding=True</code>, dynamic padding is more efficient. </p> <p>You can use the end of sequence token as the padding token, and set <code>mlm=False</code>. This will use the inputs as labels shifted to the right by one element:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="Copy code excerpt to clipboard" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><div><div class="bg-white leading-none border border-gray-100 rounded-lg inline-flex p-0.5 text-sm mb-4 select-none"><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-l false"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <p class="!m-0 ">Pytorch</p> </button><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-r text-gray-500 filter grayscale"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <p class="!m-0 ">TensorFlow</p> </button></div></div><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorForLanguageModeling <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.pad_token = tokenizer.eos_token <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=<span class="hljs-literal">False</span>)<!-- HTML_TAG_END --></pre></div> <p>For masked language modeling, use the same <a href="/docs/transformers/pr_16143/en/main_classes/data_collator#transformers.DataCollatorForLanguageModeling">DataCollatorForLanguageModeling</a> except you should specify <code>mlm_probability</code> to randomly mask tokens each time you iterate over the data.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="Copy code excerpt to clipboard" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><div><div class="bg-white leading-none border border-gray-100 rounded-lg inline-flex p-0.5 text-sm mb-4 select-none"><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-l false"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <p class="!m-0 ">Pytorch</p> </button><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-r text-gray-500 filter grayscale"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <p class="!m-0 ">TensorFlow</p> </button></div></div><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorForLanguageModeling <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.pad_token = tokenizer.eos_token <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=<span class="hljs-number">0.15</span>)<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="causal-language-modeling" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#causal-language-modeling"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Causal language modeling </span></h2> <p>Causal language modeling is frequently used for text generation. This section shows you how to fine-tune <a href="https://huggingface.co/distilgpt2" rel="nofollow">DistilGPT2</a> to generate new text.</p> <h3 class="relative group"><a id="finetune-with-trainer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#finetune-with-trainer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Fine-tune with Trainer </span></h3> <p>Load DistilGPT2 with <a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoModelForCausalLM">AutoModelForCausalLM</a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForCausalLM, TrainingArguments, Trainer <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForCausalLM.from_pretrained(<span class="hljs-string">&quot;distilgpt2&quot;</span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If you aren’t familiar with fine-tuning a model with the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>, take a look at the basic tutorial <a href="training#finetune-with-trainer">here</a>!</p></div> <p>At this point, only three steps remain:</p> <ol><li>Define your training hyperparameters in <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a>.</li> <li>Pass the training arguments to <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> along with the model, datasets, and data collator.</li> <li>Call <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer.train">train()</a> to fine-tune your model.</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> evaluation_strategy=<span class="hljs-string">&quot;epoch&quot;</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">2e-5</span>, <span class="hljs-meta">... </span> weight_decay=<span class="hljs-number">0.01</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=lm_dataset[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=lm_dataset[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> data_collator=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()<!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="finetune-with-tensorflow" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#finetune-with-tensorflow"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Fine-tune with TensorFlow </span></h3> <p>To fine-tune a model in TensorFlow is just as easy, with only a few differences.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If you aren’t familiar with fine-tuning a model with Keras, take a look at the basic tutorial <a href="training#finetune-with-keras">here</a>!</p></div> <p>Convert your datasets to the <code>tf.data.Dataset</code> format with <a href="https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.to_tf_dataset" rel="nofollow"><code>to_tf_dataset</code></a>. Specify inputs and labels in <code>columns</code>, whether to shuffle the dataset order, batch size, and the data collator:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tf_train_set = lm_dataset[<span class="hljs-string">&quot;train&quot;</span>].to_tf_dataset( <span class="hljs-meta">... </span> columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;labels&quot;</span>], <span class="hljs-meta">... </span> dummy_labels=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_test_set = lm_dataset[<span class="hljs-string">&quot;test&quot;</span>].to_tf_dataset( <span class="hljs-meta">... </span> columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;labels&quot;</span>], <span class="hljs-meta">... </span> dummy_labels=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <p>Set up an optimizer function, learning rate, and some training hyperparameters:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> create_optimizer, AdamWeightDecay <span class="hljs-meta">&gt;&gt;&gt; </span>optimizer = AdamWeightDecay(learning_rate=<span class="hljs-number">2e-5</span>, weight_decay_rate=<span class="hljs-number">0.01</span>)<!-- HTML_TAG_END --></pre></div> <p>Load DistilGPT2 with <a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.TFAutoModelForCausalLM">TFAutoModelForCausalLM</a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForCausalLM.from_pretrained(<span class="hljs-string">&quot;distilgpt2&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Configure the model for training with <a href="https://keras.io/api/models/model_training_apis/#compile-method" rel="nofollow"><code>compile</code></a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>model.<span class="hljs-built_in">compile</span>(optimizer=optimizer)<!-- HTML_TAG_END --></pre></div> <p>Call <a href="https://keras.io/api/models/model_training_apis/#fit-method" rel="nofollow"><code>fit</code></a> to fine-tune the model:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>model.fit(x=tf_train_set, validation_data=tf_test_set, epochs=<span class="hljs-number">3</span>)<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="masked-language-modeling" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#masked-language-modeling"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Masked language modeling </span></h2> <p>Masked language modeling is also known as a fill-mask task because it predicts a masked token in a sequence. Models for masked language modeling require a good contextual understanding of an entire sequence instead of only the left context. This section shows you how to fine-tune <a href="https://huggingface.co/distilroberta-base" rel="nofollow">DistilRoBERTa</a> to predict a masked word.</p> <h3 class="relative group"><a id="finetune-with-trainer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#finetune-with-trainer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Fine-tune with Trainer </span></h3> <p>Load DistilRoBERTa with <code>AutoModelForMaskedlM</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForMaskedLM.from_pretrained(<span class="hljs-string">&quot;distilroberta-base&quot;</span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If you aren’t familiar with fine-tuning a model with the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>, take a look at the basic tutorial <a href="training#finetune-with-trainer">here</a>!</p></div> <p>At this point, only three steps remain:</p> <ol><li>Define your training hyperparameters in <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a>.</li> <li>Pass the training arguments to <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> along with the model, datasets, and data collator.</li> <li>Call <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer.train">train()</a> to fine-tune your model.</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> evaluation_strategy=<span class="hljs-string">&quot;epoch&quot;</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">2e-5</span>, <span class="hljs-meta">... </span> num_train_epochs=<span class="hljs-number">3</span>, <span class="hljs-meta">... </span> weight_decay=<span class="hljs-number">0.01</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=lm_dataset[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=lm_dataset[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> data_collator=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()<!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="finetune-with-tensorflow" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#finetune-with-tensorflow"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Fine-tune with TensorFlow </span></h3> <p>To fine-tune a model in TensorFlow is just as easy, with only a few differences.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If you aren’t familiar with fine-tuning a model with Keras, take a look at the basic tutorial <a href="training#finetune-with-keras">here</a>!</p></div> <p>Convert your datasets to the <code>tf.data.Dataset</code> format with <a href="https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.to_tf_dataset" rel="nofollow"><code>to_tf_dataset</code></a>. Specify inputs and labels in <code>columns</code>, whether to shuffle the dataset order, batch size, and the data collator:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tf_train_set = lm_dataset[<span class="hljs-string">&quot;train&quot;</span>].to_tf_dataset( <span class="hljs-meta">... </span> columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;labels&quot;</span>], <span class="hljs-meta">... </span> dummy_labels=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_test_set = lm_dataset[<span class="hljs-string">&quot;test&quot;</span>].to_tf_dataset( <span class="hljs-meta">... </span> columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;labels&quot;</span>], <span class="hljs-meta">... </span> dummy_labels=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <p>Set up an optimizer function, learning rate, and some training hyperparameters:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> create_optimizer, AdamWeightDecay <span class="hljs-meta">&gt;&gt;&gt; </span>optimizer = AdamWeightDecay(learning_rate=<span class="hljs-number">2e-5</span>, weight_decay_rate=<span class="hljs-number">0.01</span>)<!-- HTML_TAG_END --></pre></div> <p>Load DistilRoBERTa with <a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.TFAutoModelForMaskedLM">TFAutoModelForMaskedLM</a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForMaskedLM <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForCausalLM.from_pretrained(<span class="hljs-string">&quot;distilroberta-base&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Configure the model for training with <a href="https://keras.io/api/models/model_training_apis/#compile-method" rel="nofollow"><code>compile</code></a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>model.<span class="hljs-built_in">compile</span>(optimizer=optimizer)<!-- HTML_TAG_END --></pre></div> <p>Call <a href="https://keras.io/api/models/model_training_apis/#fit-method" rel="nofollow"><code>fit</code></a> to fine-tune the model:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>model.fit(x=tf_train_set, validation_data=tf_test_set, epochs=<span class="hljs-number">3</span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>For a more in-depth example of how to fine-tune a model for causal language modeling, take a look at the corresponding <a href="https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/language_modeling.ipynb" rel="nofollow">PyTorch notebook</a> or <a href="https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/language_modeling-tf.ipynb" rel="nofollow">TensorFlow notebook</a>.</p></div> <script type="module" data-hydrate="18rmqp4"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="18rmqp4"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/tasks/language_modeling.mdx-e8a233ab.js") ], params: {} } }); </script>
440
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/tasks/sequence_classification.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;text-classification&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;load-imdb-dataset&quot;,&quot;title&quot;:&quot;Load IMDb dataset&quot;},{&quot;local&quot;:&quot;preprocess&quot;,&quot;title&quot;:&quot;Preprocess&quot;},{&quot;local&quot;:&quot;finetune-with-trainer&quot;,&quot;title&quot;:&quot;Fine-tune with Trainer&quot;},{&quot;local&quot;:&quot;finetune-with-tensorflow&quot;,&quot;title&quot;:&quot;Fine-tune with TensorFlow&quot;}],&quot;title&quot;:&quot;Text classification&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/tasks/sequence_classification.mdx-d07399b9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Youtube-27813aed.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlockFw-27a176a0.js"> <h1 class="relative group"><a id="text-classification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#text-classification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Text classification </span></h1> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/leNG9fN9FQU" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>Text classification is a common NLP task that assigns a label or class to text. There are many practical applications of text classification widely used in production by some of today’s largest companies. One of the most popular forms of text classification is sentiment analysis, which assigns a label like positive, negative, or neutral to a sequence of text. </p> <p>This guide will show you how to fine-tune <a href="https://huggingface.co/distilbert-base-uncased" rel="nofollow">DistilBERT</a> on the <a href="https://huggingface.co/datasets/imdb" rel="nofollow">IMDb</a> dataset to determine whether a movie review is positive or negative.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>See the text classification <a href="https://huggingface.co/tasks/text-classification" rel="nofollow">task page</a> for more information about other forms of text classification and their associated models, datasets, and metrics.</p></div> <h2 class="relative group"><a id="load-imdb-dataset" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#load-imdb-dataset"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Load IMDb dataset </span></h2> <p>Load the IMDb dataset from the 🤗 Datasets library:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>imdb = load_dataset(<span class="hljs-string">&quot;imdb&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Then take a look at an example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>imdb[<span class="hljs-string">&quot;test&quot;</span>][<span class="hljs-number">0</span>] { <span class="hljs-string">&quot;label&quot;</span>: <span class="hljs-number">0</span>, <span class="hljs-string">&quot;text&quot;</span>: <span class="hljs-string">&quot;I love sci-fi and am willing to put up with a lot. Sci-fi movies/TV are usually underfunded, under-appreciated and misunderstood. I tried to like this, I really did, but it is to good TV sci-fi as Babylon 5 is to Star Trek (the original). Silly prosthetics, cheap cardboard sets, stilted dialogues, CG that doesn&#x27;t match the background, and painfully one-dimensional characters cannot be overcome with a &#x27;sci-fi&#x27; setting. (I&#x27;m sure there are those of you out there who think Babylon 5 is good sci-fi TV. It&#x27;s not. It&#x27;s clichéd and uninspiring.) While US viewers might like emotion and character development, sci-fi is a genre that does not take itself seriously (cf. Star Trek). It may treat important issues, yet not as a serious philosophy. It&#x27;s really difficult to care about the characters here as they are not simply foolish, just missing a spark of life. Their actions and reactions are wooden and predictable, often painful to watch. The makers of Earth KNOW it&#x27;s rubbish as they have to always say \&quot;Gene Roddenberry&#x27;s Earth...\&quot; otherwise people would not continue watching. Roddenberry&#x27;s ashes must be turning in their orbit as this dull, cheap, poorly edited (watching it without advert breaks really brings this home) trudging Trabant of a show lumbers into space. Spoiler. So, kill off a main character. And then bring him back as another actor. Jeeez! Dallas all over again.&quot;</span>, }<!-- HTML_TAG_END --></pre></div> <p>There are two fields in this dataset: </p> <ul><li><code>text</code>: a string containing the text of the movie review.</li> <li><code>label</code>: a value that can either be <code>0</code> for a negative review or <code>1</code> for a positive review.</li></ul> <h2 class="relative group"><a id="preprocess" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#preprocess"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Preprocess </span></h2> <p>Load the DistilBERT tokenizer to process the <code>text</code> field:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Create a preprocessing function to tokenize <code>text</code> and truncate sequences to be no longer than DistilBERT’s maximum input length:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">preprocess_function</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> tokenizer(examples[<span class="hljs-string">&quot;text&quot;</span>], truncation=<span class="hljs-literal">True</span>)<!-- HTML_TAG_END --></pre></div> <p>Use 🤗 Datasets <a href="https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map" rel="nofollow"><code>map</code></a> function to apply the preprocessing function over the entire dataset. You can speed up the <code>map</code> function by setting <code>batched=True</code> to process multiple elements of the dataset at once:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->tokenized_imdb = imdb.<span class="hljs-built_in">map</span>(preprocess_function, batched=<span class="hljs-literal">True</span>)<!-- HTML_TAG_END --></pre></div> <p>Use <a href="/docs/transformers/pr_16143/en/main_classes/data_collator#transformers.DataCollatorWithPadding">DataCollatorWithPadding</a> to create a batch of examples. It will also <em>dynamically pad</em> your text to the length of the longest element in its batch, so they are a uniform length. While it is possible to pad your text in the <code>tokenizer</code> function by setting <code>padding=True</code>, dynamic padding is more efficient.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="Copy code excerpt to clipboard" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><div><div class="bg-white leading-none border border-gray-100 rounded-lg inline-flex p-0.5 text-sm mb-4 select-none"><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-l false"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <p class="!m-0 ">Pytorch</p> </button><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-r text-gray-500 filter grayscale"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <p class="!m-0 ">TensorFlow</p> </button></div></div><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorWithPadding <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorWithPadding(tokenizer=tokenizer)<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="finetune-with-trainer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#finetune-with-trainer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Fine-tune with Trainer </span></h2> <p>Load DistilBERT with <a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoModelForSequenceClassification">AutoModelForSequenceClassification</a> along with the number of expected labels:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForSequenceClassification, TrainingArguments, Trainer <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>, num_labels=<span class="hljs-number">2</span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If you aren’t familiar with fine-tuning a model with the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>, take a look at the basic tutorial <a href="training#finetune-with-trainer">here</a>!</p></div> <p>At this point, only three steps remain:</p> <ol><li>Define your training hyperparameters in <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a>.</li> <li>Pass the training arguments to <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> along with the model, dataset, tokenizer, and data collator.</li> <li>Call <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer.train">train()</a> to fine-tune your model.</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">2e-5</span>, <span class="hljs-meta">... </span> per_device_train_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> per_device_eval_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> num_train_epochs=<span class="hljs-number">5</span>, <span class="hljs-meta">... </span> weight_decay=<span class="hljs-number">0.01</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=tokenized_imdb[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=tokenized_imdb[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> tokenizer=tokenizer, <span class="hljs-meta">... </span> data_collator=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p><a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> will apply dynamic padding by default when you pass <code>tokenizer</code> to it. In this case, you don’t need to specify a data collator explicitly.</p></div> <h2 class="relative group"><a id="finetune-with-tensorflow" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#finetune-with-tensorflow"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Fine-tune with TensorFlow </span></h2> <p>To fine-tune a model in TensorFlow is just as easy, with only a few differences.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If you aren’t familiar with fine-tuning a model with Keras, take a look at the basic tutorial <a href="training#finetune-with-keras">here</a>!</p></div> <p>Convert your datasets to the <code>tf.data.Dataset</code> format with <a href="https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.to_tf_dataset" rel="nofollow"><code>to_tf_dataset</code></a>. Specify inputs and labels in <code>columns</code>, whether to shuffle the dataset order, batch size, and the data collator:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tf_train_dataset = tokenized_imdb[<span class="hljs-string">&quot;train&quot;</span>].to_tf_dataset( <span class="hljs-meta">... </span> columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;label&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_validation_dataset = tokenized_imdb[<span class="hljs-string">&quot;train&quot;</span>].to_tf_dataset( <span class="hljs-meta">... </span> columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;label&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <p>Set up an optimizer function, learning rate schedule, and some training hyperparameters:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> create_optimizer <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>batch_size = <span class="hljs-number">16</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_epochs = <span class="hljs-number">5</span> <span class="hljs-meta">&gt;&gt;&gt; </span>batches_per_epoch = <span class="hljs-built_in">len</span>(tokenized_imdb[<span class="hljs-string">&quot;train&quot;</span>]) // batch_size <span class="hljs-meta">&gt;&gt;&gt; </span>total_train_steps = <span class="hljs-built_in">int</span>(batches_per_epoch * num_epochs) <span class="hljs-meta">&gt;&gt;&gt; </span>optimizer, schedule = create_optimizer(init_lr=<span class="hljs-number">2e-5</span>, num_warmup_steps=<span class="hljs-number">0</span>, num_train_steps=total_train_steps)<!-- HTML_TAG_END --></pre></div> <p>Load DistilBERT with <a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.TFAutoModelForSequenceClassification">TFAutoModelForSequenceClassification</a> along with the number of expected labels:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>, num_labels=<span class="hljs-number">2</span>)<!-- HTML_TAG_END --></pre></div> <p>Configure the model for training with <a href="https://keras.io/api/models/model_training_apis/#compile-method" rel="nofollow"><code>compile</code></a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>model.<span class="hljs-built_in">compile</span>(optimizer=optimizer)<!-- HTML_TAG_END --></pre></div> <p>Call <a href="https://keras.io/api/models/model_training_apis/#fit-method" rel="nofollow"><code>fit</code></a> to fine-tune the model:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=<span class="hljs-number">3</span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>For a more in-depth example of how to fine-tune a model for text classification, take a look at the corresponding <a href="https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/text_classification.ipynb" rel="nofollow">PyTorch notebook</a> or <a href="https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/text_classification-tf.ipynb" rel="nofollow">TensorFlow notebook</a>.</p></div> <script type="module" data-hydrate="v72pzj"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="v72pzj"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/tasks/sequence_classification.mdx-d07399b9.js") ], params: {} } }); </script>
441
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/tasks/translation.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;translation&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;load-opus-books-dataset&quot;,&quot;title&quot;:&quot;Load OPUS Books dataset&quot;},{&quot;local&quot;:&quot;preprocess&quot;,&quot;title&quot;:&quot;Preprocess&quot;},{&quot;local&quot;:&quot;finetune-with-trainer&quot;,&quot;title&quot;:&quot;Fine-tune with Trainer&quot;},{&quot;local&quot;:&quot;finetune-with-tensorflow&quot;,&quot;title&quot;:&quot;Fine-tune with TensorFlow&quot;}],&quot;title&quot;:&quot;Translation&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/tasks/translation.mdx-9674fec0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Youtube-27813aed.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlockFw-27a176a0.js"> <h1 class="relative group"><a id="translation" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#translation"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Translation </span></h1> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/1JvfrvZgi6c" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>Translation converts a sequence of text from one language to another. It is one of several tasks you can formulate as a sequence-to-sequence problem, a powerful framework that extends to vision and audio tasks. </p> <p>This guide will show you how to fine-tune <a href="https://huggingface.co/t5-small" rel="nofollow">T5</a> on the English-French subset of the <a href="https://huggingface.co/datasets/opus_books" rel="nofollow">OPUS Books</a> dataset to translate English text to French.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>See the translation <a href="https://huggingface.co/tasks/translation" rel="nofollow">task page</a> for more information about its associated models, datasets, and metrics.</p></div> <h2 class="relative group"><a id="load-opus-books-dataset" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#load-opus-books-dataset"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Load OPUS Books dataset </span></h2> <p>Load the OPUS Books dataset from the 🤗 Datasets library:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>books = load_dataset(<span class="hljs-string">&quot;opus_books&quot;</span>, <span class="hljs-string">&quot;en-fr&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Split this dataset into a train and test set:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->books = books[<span class="hljs-string">&quot;train&quot;</span>].train_test_split(test_size=<span class="hljs-number">0.2</span>)<!-- HTML_TAG_END --></pre></div> <p>Then take a look at an example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>books[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;id&#x27;</span>: <span class="hljs-string">&#x27;90560&#x27;</span>, <span class="hljs-string">&#x27;translation&#x27;</span>: {<span class="hljs-string">&#x27;en&#x27;</span>: <span class="hljs-string">&#x27;But this lofty plateau measured only a few fathoms, and soon we reentered Our Element.&#x27;</span>, <span class="hljs-string">&#x27;fr&#x27;</span>: <span class="hljs-string">&#x27;Mais ce plateau élevé ne mesurait que quelques toises, et bientôt nous fûmes rentrés dans notre élément.&#x27;</span>}}<!-- HTML_TAG_END --></pre></div> <p>The <code>translation</code> field is a dictionary containing the English and French translations of the text.</p> <h2 class="relative group"><a id="preprocess" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#preprocess"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Preprocess </span></h2> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/XAR8jnZZuUs" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>Load the T5 tokenizer to process the language pairs:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;t5-small&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>The preprocessing function needs to:</p> <ol><li>Prefix the input with a prompt so T5 knows this is a translation task. Some models capable of multiple NLP tasks require prompting for specific tasks.</li> <li>Tokenize the input (English) and target (French) separately. You can’t tokenize French text with a tokenizer pretrained on an English vocabulary. A context manager will help set the tokenizer to French first before tokenizing it.</li> <li>Truncate sequences to be no longer than the maximum length set by the <code>max_length</code> parameter.</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>source_lang = <span class="hljs-string">&quot;en&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>target_lang = <span class="hljs-string">&quot;fr&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>prefix = <span class="hljs-string">&quot;translate English to French: &quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">preprocess_function</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> inputs = [prefix + example[source_lang] <span class="hljs-keyword">for</span> example <span class="hljs-keyword">in</span> examples[<span class="hljs-string">&quot;translation&quot;</span>]] <span class="hljs-meta">... </span> targets = [example[target_lang] <span class="hljs-keyword">for</span> example <span class="hljs-keyword">in</span> examples[<span class="hljs-string">&quot;translation&quot;</span>]] <span class="hljs-meta">... </span> model_inputs = tokenizer(inputs, max_length=<span class="hljs-number">128</span>, truncation=<span class="hljs-literal">True</span>) <span class="hljs-meta">... </span> <span class="hljs-keyword">with</span> tokenizer.as_target_tokenizer(): <span class="hljs-meta">... </span> labels = tokenizer(targets, max_length=<span class="hljs-number">128</span>, truncation=<span class="hljs-literal">True</span>) <span class="hljs-meta">... </span> model_inputs[<span class="hljs-string">&quot;labels&quot;</span>] = labels[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> model_inputs<!-- HTML_TAG_END --></pre></div> <p>Use 🤗 Datasets <a href="https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map" rel="nofollow"><code>map</code></a> function to apply the preprocessing function over the entire dataset. You can speed up the <code>map</code> function by setting <code>batched=True</code> to process multiple elements of the dataset at once:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tokenized_books = books.<span class="hljs-built_in">map</span>(preprocess_function, batched=<span class="hljs-literal">True</span>)<!-- HTML_TAG_END --></pre></div> <p>Use <a href="/docs/transformers/pr_16143/en/main_classes/data_collator#transformers.DataCollatorForSeq2Seq">DataCollatorForSeq2Seq</a> to create a batch of examples. It will also <em>dynamically pad</em> your text and labels to the length of the longest element in its batch, so they are a uniform length. While it is possible to pad your text in the <code>tokenizer</code> function by setting <code>padding=True</code>, dynamic padding is more efficient.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="Copy code excerpt to clipboard" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><div><div class="bg-white leading-none border border-gray-100 rounded-lg inline-flex p-0.5 text-sm mb-4 select-none"><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-l false"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <p class="!m-0 ">Pytorch</p> </button><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-r text-gray-500 filter grayscale"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <p class="!m-0 ">TensorFlow</p> </button></div></div><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorForSeq2Seq <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=model)<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="finetune-with-trainer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#finetune-with-trainer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Fine-tune with Trainer </span></h2> <p>Load T5 with <a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoModelForSeq2SeqLM">AutoModelForSeq2SeqLM</a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForSeq2SeqLM, Seq2SeqTrainingArguments, Seq2SeqTrainer <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;t5-small&quot;</span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If you aren’t familiar with fine-tuning a model with the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>, take a look at the basic tutorial <a href="training#finetune-with-trainer">here</a>!</p></div> <p>At this point, only three steps remain:</p> <ol><li>Define your training hyperparameters in <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Seq2SeqTrainingArguments">Seq2SeqTrainingArguments</a>.</li> <li>Pass the training arguments to <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Seq2SeqTrainer">Seq2SeqTrainer</a> along with the model, dataset, tokenizer, and data collator.</li> <li>Call <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer.train">train()</a> to fine-tune your model.</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>training_args = Seq2SeqTrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> evaluation_strategy=<span class="hljs-string">&quot;epoch&quot;</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">2e-5</span>, <span class="hljs-meta">... </span> per_device_train_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> per_device_eval_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> weight_decay=<span class="hljs-number">0.01</span>, <span class="hljs-meta">... </span> save_total_limit=<span class="hljs-number">3</span>, <span class="hljs-meta">... </span> num_train_epochs=<span class="hljs-number">1</span>, <span class="hljs-meta">... </span> fp16=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Seq2SeqTrainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=tokenized_books[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=tokenized_books[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> tokenizer=tokenizer, <span class="hljs-meta">... </span> data_collator=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="finetune-with-tensorflow" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#finetune-with-tensorflow"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Fine-tune with TensorFlow </span></h2> <p>To fine-tune a model in TensorFlow is just as easy, with only a few differences.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If you aren’t familiar with fine-tuning a model with Keras, take a look at the basic tutorial <a href="training#finetune-with-keras">here</a>!</p></div> <p>Convert your datasets to the <code>tf.data.Dataset</code> format with <a href="https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.to_tf_dataset" rel="nofollow"><code>to_tf_dataset</code></a>. Specify inputs and labels in <code>columns</code>, whether to shuffle the dataset order, batch size, and the data collator:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tf_train_set = tokenized_books[<span class="hljs-string">&quot;train&quot;</span>].to_tf_dataset( <span class="hljs-meta">... </span> columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;labels&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_test_set = tokenized_books[<span class="hljs-string">&quot;test&quot;</span>].to_tf_dataset( <span class="hljs-meta">... </span> columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;labels&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <p>Set up an optimizer function, learning rate schedule, and some training hyperparameters:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> create_optimizer, AdamWeightDecay <span class="hljs-meta">&gt;&gt;&gt; </span>optimizer = AdamWeightDecay(learning_rate=<span class="hljs-number">2e-5</span>, weight_decay_rate=<span class="hljs-number">0.01</span>)<!-- HTML_TAG_END --></pre></div> <p>Load T5 with <a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.TFAutoModelForSeq2SeqLM">TFAutoModelForSeq2SeqLM</a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForSeq2SeqLM <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;t5-small&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Configure the model for training with <a href="https://keras.io/api/models/model_training_apis/#compile-method" rel="nofollow"><code>compile</code></a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>model.<span class="hljs-built_in">compile</span>(optimizer=optimizer)<!-- HTML_TAG_END --></pre></div> <p>Call <a href="https://keras.io/api/models/model_training_apis/#fit-method" rel="nofollow"><code>fit</code></a> to fine-tune the model:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>model.fit(x=tf_train_set, validation_data=tf_test_set, epochs=<span class="hljs-number">3</span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>For a more in-depth example of how to fine-tune a model for translation, take a look at the corresponding <a href="https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/translation.ipynb" rel="nofollow">PyTorch notebook</a> or <a href="https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/translation-tf.ipynb" rel="nofollow">TensorFlow notebook</a>.</p></div> <script type="module" data-hydrate="1fpusv"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="1fpusv"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/tasks/translation.mdx-9674fec0.js") ], params: {} } }); </script>
442
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/tasks/audio_classification.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;audio-classification&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;load-superb-dataset&quot;,&quot;title&quot;:&quot;Load SUPERB dataset&quot;},{&quot;local&quot;:&quot;preprocess&quot;,&quot;title&quot;:&quot;Preprocess&quot;},{&quot;local&quot;:&quot;finetune-with-trainer&quot;,&quot;title&quot;:&quot;Fine-tune with Trainer&quot;}],&quot;title&quot;:&quot;Audio classification&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/tasks/audio_classification.mdx-cab2e6ac.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Youtube-27813aed.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="audio-classification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#audio-classification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Audio classification </span></h1> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/KWwzcmG98Ds" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>Audio classification assigns a label or class to audio data. It is similar to text classification, except an audio input is continuous and must be discretized, whereas text can be split into tokens. Some practical applications of audio classification include identifying intent, speakers, and even animal species by their sounds.</p> <p>This guide will show you how to fine-tune <a href="https://huggingface.co/facebook/wav2vec2-base" rel="nofollow">Wav2Vec2</a> on the Keyword Spotting subset of the <a href="https://huggingface.co/datasets/superb" rel="nofollow">SUPERB</a> benchmark to classify utterances.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>See the audio classification <a href="https://huggingface.co/tasks/audio-classification" rel="nofollow">task page</a> for more information about its associated models, datasets, and metrics.</p></div> <h2 class="relative group"><a id="load-superb-dataset" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#load-superb-dataset"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Load SUPERB dataset </span></h2> <p>Load the SUPERB dataset from the 🤗 Datasets library:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>ks = load_dataset(<span class="hljs-string">&quot;superb&quot;</span>, <span class="hljs-string">&quot;ks&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Then take a look at an example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>ks[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;audio&#x27;</span>: {<span class="hljs-string">&#x27;array&#x27;</span>: array([ <span class="hljs-number">0.</span> , <span class="hljs-number">0.</span> , <span class="hljs-number">0.</span> , ..., -<span class="hljs-number">0.00592041</span>, -<span class="hljs-number">0.00405884</span>, -<span class="hljs-number">0.00253296</span>], dtype=float32), <span class="hljs-string">&#x27;path&#x27;</span>: <span class="hljs-string">&#x27;/root/.cache/huggingface/datasets/downloads/extracted/05734a36d88019a09725c20cc024e1c4e7982e37d7d55c0c1ca1742ea1cdd47f/_background_noise_/doing_the_dishes.wav&#x27;</span>, <span class="hljs-string">&#x27;sampling_rate&#x27;</span>: <span class="hljs-number">16000</span>}, <span class="hljs-string">&#x27;file&#x27;</span>: <span class="hljs-string">&#x27;/root/.cache/huggingface/datasets/downloads/extracted/05734a36d88019a09725c20cc024e1c4e7982e37d7d55c0c1ca1742ea1cdd47f/_background_noise_/doing_the_dishes.wav&#x27;</span>, <span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-number">10</span>}<!-- HTML_TAG_END --></pre></div> <p>The <code>audio</code> column contains a 1-dimensional <code>array</code> of the speech signal that must be called to load and resample the audio file. The <code>label</code> column is an integer that represents the utterance class. Create a dictionary that maps a label name to an integer and vice versa. The mapping will help the model recover the label name from the label number:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>labels = ks[<span class="hljs-string">&quot;train&quot;</span>].features[<span class="hljs-string">&quot;label&quot;</span>].names <span class="hljs-meta">&gt;&gt;&gt; </span>label2id, id2label = <span class="hljs-built_in">dict</span>(), <span class="hljs-built_in">dict</span>() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> i, label <span class="hljs-keyword">in</span> <span class="hljs-built_in">enumerate</span>(labels): <span class="hljs-meta">... </span> label2id[label] = <span class="hljs-built_in">str</span>(i) <span class="hljs-meta">... </span> id2label[<span class="hljs-built_in">str</span>(i)] = label<!-- HTML_TAG_END --></pre></div> <p>Now you can convert the label number to a label name for more information:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>id2label[<span class="hljs-built_in">str</span>(<span class="hljs-number">10</span>)] <span class="hljs-string">&#x27;_silence_&#x27;</span><!-- HTML_TAG_END --></pre></div> <p>Each keyword - or label - corresponds to a number; <code>10</code> indicates <code>silence</code> in the example above.</p> <h2 class="relative group"><a id="preprocess" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#preprocess"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Preprocess </span></h2> <p>Load the Wav2Vec2 feature extractor to process the audio signal:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoFeatureExtractor <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = AutoFeatureExtractor.from_pretrained(<span class="hljs-string">&quot;facebook/wav2vec2-base&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>The preprocessing function needs to:</p> <ol><li>Call the <code>audio</code> column to load and if necessary resample the audio file.</li> <li>Check the sampling rate of the audio file matches the sampling rate of the audio data a model was pretrained with. You can find this information on the Wav2Vec2 <a href="(https://huggingface.co/facebook/wav2vec2-base)">model card</a>.</li> <li>Set a maximum input length so longer inputs are batched without being truncated.</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">preprocess_function</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> audio_arrays = [x[<span class="hljs-string">&quot;array&quot;</span>] <span class="hljs-keyword">for</span> x <span class="hljs-keyword">in</span> examples[<span class="hljs-string">&quot;audio&quot;</span>]] <span class="hljs-meta">... </span> inputs = feature_extractor( <span class="hljs-meta">... </span> audio_arrays, sampling_rate=feature_extractor.sampling_rate, max_length=<span class="hljs-number">16000</span>, truncation=<span class="hljs-literal">True</span> <span class="hljs-meta">... </span> ) <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> inputs<!-- HTML_TAG_END --></pre></div> <p>Use 🤗 Datasets <a href="https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map" rel="nofollow"><code>map</code></a> function to apply the preprocessing function over the entire dataset. You can speed up the <code>map</code> function by setting <code>batched=True</code> to process multiple elements of the dataset at once. Remove the columns you don’t need:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>encoded_ks = ks.<span class="hljs-built_in">map</span>(preprocess_function, remove_columns=[<span class="hljs-string">&quot;audio&quot;</span>, <span class="hljs-string">&quot;file&quot;</span>], batched=<span class="hljs-literal">True</span>)<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="finetune-with-trainer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#finetune-with-trainer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Fine-tune with Trainer </span></h2> <p>Load Wav2Vec2 with <a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoModelForAudioClassification">AutoModelForAudioClassification</a>. Specify the number of labels, and pass the model the mapping between label number and label class:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForAudioClassification, TrainingArguments, Trainer <span class="hljs-meta">&gt;&gt;&gt; </span>num_labels = <span class="hljs-built_in">len</span>(id2label) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForAudioClassification.from_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;facebook/wav2vec2-base&quot;</span>, num_labels=num_labels, label2id=label2id, id2label=id2label <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If you aren’t familiar with fine-tuning a model with the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>, take a look at the basic tutorial <a href="training#finetune-with-trainer">here</a>!</p></div> <p>At this point, only three steps remain:</p> <ol><li>Define your training hyperparameters in <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a>.</li> <li>Pass the training arguments to <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> along with the model, datasets, and feature extractor.</li> <li>Call <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer.train">train()</a> to fine-tune your model.</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> evaluation_strategy=<span class="hljs-string">&quot;epoch&quot;</span>, <span class="hljs-meta">... </span> save_strategy=<span class="hljs-string">&quot;epoch&quot;</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">3e-5</span>, <span class="hljs-meta">... </span> num_train_epochs=<span class="hljs-number">5</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=encoded_ks[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=encoded_ks[<span class="hljs-string">&quot;validation&quot;</span>], <span class="hljs-meta">... </span> tokenizer=feature_extractor, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>For a more in-depth example of how to fine-tune a model for audio classification, take a look at the corresponding <a href="https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/audio_classification.ipynb" rel="nofollow">PyTorch notebook</a>.</p></div> <script type="module" data-hydrate="1h108g8"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="1h108g8"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/tasks/audio_classification.mdx-cab2e6ac.js") ], params: {} } }); </script>
443
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/tasks/image_classification.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;image-classification&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;load-food101-dataset&quot;,&quot;title&quot;:&quot;Load Food-101 dataset&quot;},{&quot;local&quot;:&quot;preprocess&quot;,&quot;title&quot;:&quot;Preprocess&quot;},{&quot;local&quot;:&quot;finetune-with-trainer&quot;,&quot;title&quot;:&quot;Fine-tune with Trainer&quot;}],&quot;title&quot;:&quot;Image classification&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/tasks/image_classification.mdx-e10d3b71.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Youtube-27813aed.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="image-classification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#image-classification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Image classification </span></h1> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/tjAIM7BOYhw" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>Image classification assigns a label or class to an image. Unlike text or audio classification, the inputs are the pixel values that represent an image. There are many uses for image classification, like detecting damage after a disaster, monitoring crop health, or helping screen medical images for signs of disease.</p> <p>This guide will show you how to fine-tune <a href="https://huggingface.co/docs/transformers/v4.16.2/en/model_doc/vit" rel="nofollow">ViT</a> on the <a href="https://huggingface.co/datasets/food101" rel="nofollow">Food-101</a> dataset to classify a food item in an image.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>See the image classification <a href="https://huggingface.co/tasks/audio-classification" rel="nofollow">task page</a> for more information about its associated models, datasets, and metrics.</p></div> <h2 class="relative group"><a id="load-food101-dataset" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#load-food101-dataset"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Load Food-101 dataset </span></h2> <p>Load only the first 5000 images of the Food-101 dataset from the 🤗 Datasets library since it is pretty large:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>food = load_dataset(<span class="hljs-string">&quot;food101&quot;</span>, split=<span class="hljs-string">&quot;train[:5000]&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Split this dataset into a train and test set:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>food = food.train_test_split(test_size=<span class="hljs-number">0.2</span>)<!-- HTML_TAG_END --></pre></div> <p>Then take a look at an example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>food[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;image&#x27;</span>: &lt;PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=512x512 at <span class="hljs-number">0x7F52AFC8AC50</span>&gt;, <span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-number">79</span>}<!-- HTML_TAG_END --></pre></div> <p>The <code>image</code> field contains a PIL image, and each <code>label</code> is an integer that represents a class. Create a dictionary that maps a label name to an integer and vice versa. The mapping will help the model recover the label name from the label number:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>labels = food[<span class="hljs-string">&quot;train&quot;</span>].features[<span class="hljs-string">&quot;label&quot;</span>].names <span class="hljs-meta">&gt;&gt;&gt; </span>label2id, id2label = <span class="hljs-built_in">dict</span>(), <span class="hljs-built_in">dict</span>() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> i, label <span class="hljs-keyword">in</span> <span class="hljs-built_in">enumerate</span>(labels): <span class="hljs-meta">... </span> label2id[label] = <span class="hljs-built_in">str</span>(i) <span class="hljs-meta">... </span> id2label[<span class="hljs-built_in">str</span>(i)] = label<!-- HTML_TAG_END --></pre></div> <p>Now you can convert the label number to a label name for more information:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>id2label[<span class="hljs-built_in">str</span>(<span class="hljs-number">79</span>)] <span class="hljs-string">&#x27;prime_rib&#x27;</span><!-- HTML_TAG_END --></pre></div> <p>Each food class - or label - corresponds to a number; <code>79</code> indicates a prime rib in the example above.</p> <h2 class="relative group"><a id="preprocess" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#preprocess"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Preprocess </span></h2> <p>Load the ViT feature extractor to process the image into a tensor:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoFeatureExtractor <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = AutoFeatureExtractor.from_pretrained(<span class="hljs-string">&quot;google/vit-base-patch16-224-in21k&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Apply several image transformations to the dataset to make the model more robust against overfitting. Here you’ll use torchvision’s <a href="https://pytorch.org/vision/stable/transforms.html" rel="nofollow"><code>transforms</code></a> module. Crop a random part of the image, resize it, and normalize it with the image mean and standard deviation:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> torchvision.transforms <span class="hljs-keyword">import</span> RandomResizedCrop, Compose, Normalize, ToTensor <span class="hljs-meta">&gt;&gt;&gt; </span>normalize = Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std) <span class="hljs-meta">&gt;&gt;&gt; </span>_transforms = Compose([RandomResizedCrop(feature_extractor.size), ToTensor(), normalize])<!-- HTML_TAG_END --></pre></div> <p>Create a preprocessing function that will apply the transforms and return the <code>pixel_values</code> - the inputs to the model - of the image:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">transforms</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> examples[<span class="hljs-string">&quot;pixel_values&quot;</span>] = [_transforms(img.convert(<span class="hljs-string">&quot;RGB&quot;</span>)) <span class="hljs-keyword">for</span> img <span class="hljs-keyword">in</span> examples[<span class="hljs-string">&quot;image&quot;</span>]] <span class="hljs-meta">... </span> <span class="hljs-keyword">del</span> examples[<span class="hljs-string">&quot;image&quot;</span>] <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> examples<!-- HTML_TAG_END --></pre></div> <p>Use 🤗 Dataset’s <a href="https://huggingface.co/docs/datasets/package_reference/main_classes.html?#datasets.Dataset.with_transform" rel="nofollow"><code>with_transform</code></a> method to apply the transforms over the entire dataset. The transforms are applied on-the-fly when you load an element of the dataset:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>food = food.with_transform(transforms)<!-- HTML_TAG_END --></pre></div> <p>Use <a href="/docs/transformers/pr_16143/en/main_classes/data_collator#transformers.DefaultDataCollator">DefaultDataCollator</a> to create a batch of examples. Unlike other data collators in 🤗 Transformers, the DefaultDataCollator does not apply additional preprocessing such as padding.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DefaultDataCollator <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DefaultDataCollator()<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="finetune-with-trainer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#finetune-with-trainer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Fine-tune with Trainer </span></h2> <p>Load ViT with <a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoModelForImageClassification">AutoModelForImageClassification</a>. Specify the number of labels, and pass the model the mapping between label number and label class:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForImageClassification, TrainingArguments, Trainer <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForImageClassification.from_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;google/vit-base-patch16-224-in21k&quot;</span>, <span class="hljs-meta">... </span> num_labels=<span class="hljs-built_in">len</span>(labels), <span class="hljs-meta">... </span> id2label=id2label, <span class="hljs-meta">... </span> label2id=label2id, <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If you aren’t familiar with fine-tuning a model with the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>, take a look at the basic tutorial <a href="training#finetune-with-trainer">here</a>!</p></div> <p>At this point, only three steps remain:</p> <ol><li>Define your training hyperparameters in <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a>. It is important you don’t remove unused columns because this will drop the <code>image</code> column. Without the <code>image</code> column, you can’t create <code>pixel_values</code>. Set <code>remove_unused_columns=False</code> to prevent this behavior!</li> <li>Pass the training arguments to <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> along with the model, datasets, tokenizer, and data collator.</li> <li>Call <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer.train">train()</a> to fine-tune your model.</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> per_device_train_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> evaluation_strategy=<span class="hljs-string">&quot;steps&quot;</span>, <span class="hljs-meta">... </span> num_train_epochs=<span class="hljs-number">4</span>, <span class="hljs-meta">... </span> fp16=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> save_steps=<span class="hljs-number">100</span>, <span class="hljs-meta">... </span> eval_steps=<span class="hljs-number">100</span>, <span class="hljs-meta">... </span> logging_steps=<span class="hljs-number">10</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">2e-4</span>, <span class="hljs-meta">... </span> save_total_limit=<span class="hljs-number">2</span>, <span class="hljs-meta">... </span> remove_unused_columns=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> data_collator=data_collator, <span class="hljs-meta">... </span> train_dataset=food[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=food[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> tokenizer=feature_extractor, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>For a more in-depth example of how to fine-tune a model for image classification, take a look at the corresponding <a href="https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/image_classification.ipynb" rel="nofollow">PyTorch notebook</a>.</p></div> <script type="module" data-hydrate="13ogidu"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="13ogidu"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/tasks/image_classification.mdx-e10d3b71.js") ], params: {} } }); </script>
444
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/tasks/summarization.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;summarization&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;load-billsum-dataset&quot;,&quot;title&quot;:&quot;Load BillSum dataset&quot;},{&quot;local&quot;:&quot;preprocess&quot;,&quot;title&quot;:&quot;Preprocess&quot;},{&quot;local&quot;:&quot;finetune-with-trainer&quot;,&quot;title&quot;:&quot;Fine-tune with Trainer&quot;},{&quot;local&quot;:&quot;finetune-with-tensorflow&quot;,&quot;title&quot;:&quot;Fine-tune with TensorFlow&quot;}],&quot;title&quot;:&quot;Summarization&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/tasks/summarization.mdx-34e69920.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Youtube-27813aed.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlockFw-27a176a0.js"> <h1 class="relative group"><a id="summarization" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#summarization"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Summarization </span></h1> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/yHnr5Dk2zCI" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>Summarization creates a shorter version of a document or an article that captures all the important information. Along with translation, it is another example of a task that can be formulated as a sequence-to-sequence task. Summarization can be:</p> <ul><li>Extractive: extract the most relevant information from a document.</li> <li>Abstractive: generate new text that captures the most relevant information. </li></ul> <p>This guide will show you how to fine-tune <a href="https://huggingface.co/t5-small" rel="nofollow">T5</a> on the California state bill subset of the <a href="https://huggingface.co/datasets/billsum" rel="nofollow">BillSum</a> dataset for abstractive summarization.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>See the summarization <a href="https://huggingface.co/tasks/summarization" rel="nofollow">task page</a> for more information about its associated models, datasets, and metrics.</p></div> <h2 class="relative group"><a id="load-billsum-dataset" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#load-billsum-dataset"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Load BillSum dataset </span></h2> <p>Load the BillSum dataset from the 🤗 Datasets library:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>billsum = load_dataset(<span class="hljs-string">&quot;billsum&quot;</span>, split=<span class="hljs-string">&quot;ca_test&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Split this dataset into a train and test set:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>billsum = billsum.train_test_split(test_size=<span class="hljs-number">0.2</span>)<!-- HTML_TAG_END --></pre></div> <p>Then take a look at an example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>billsum[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;summary&#x27;</span>: <span class="hljs-string">&#x27;Existing law authorizes state agencies to enter into contracts for the acquisition of goods or services upon approval by the Department of General Services. Existing law sets forth various requirements and prohibitions for those contracts, including, but not limited to, a prohibition on entering into contracts for the acquisition of goods or services of $100,000 or more with a contractor that discriminates between spouses and domestic partners or same-sex and different-sex couples in the provision of benefits. Existing law provides that a contract entered into in violation of those requirements and prohibitions is void and authorizes the state or any person acting on behalf of the state to bring a civil action seeking a determination that a contract is in violation and therefore void. Under existing law, a willful violation of those requirements and prohibitions is a misdemeanor.\nThis bill would also prohibit a state agency from entering into contracts for the acquisition of goods or services of $100,000 or more with a contractor that discriminates between employees on the basis of gender identity in the provision of benefits, as specified. By expanding the scope of a crime, this bill would impose a state-mandated local program.\nThe California Constitution requires the state to reimburse local agencies and school districts for certain costs mandated by the state. Statutory provisions establish procedures for making that reimbursement.\nThis bill would provide that no reimbursement is required by this act for a specified reason.&#x27;</span>, <span class="hljs-string">&#x27;text&#x27;</span>: <span class="hljs-string">&#x27;The people of the State of California do enact as follows:\n\n\nSECTION 1.\nSection 10295.35 is added to the Public Contract Code, to read:\n10295.35.\n(a) (1) Notwithstanding any other law, a state agency shall not enter into any contract for the acquisition of goods or services in the amount of one hundred thousand dollars ($100,000) or more with a contractor that, in the provision of benefits, discriminates between employees on the basis of an employee’s or dependent’s actual or perceived gender identity, including, but not limited to, the employee’s or dependent’s identification as transgender.\n(2) For purposes of this section, “contract” includes contracts with a cumulative amount of one hundred thousand dollars ($100,000) or more per contractor in each fiscal year.\n(3) For purposes of this section, an employee health plan is discriminatory if the plan is not consistent with Section 1365.5 of the Health and Safety Code and Section 10140 of the Insurance Code.\n(4) The requirements of this section shall apply only to those portions of a contractor’s operations that occur under any of the following conditions:\n(A) Within the state.\n(B) On real property outside the state if the property is owned by the state or if the state has a right to occupy the property, and if the contractor’s presence at that location is connected to a contract with the state.\n(C) Elsewhere in the United States where work related to a state contract is being performed.\n(b) Contractors shall treat as confidential, to the maximum extent allowed by law or by the requirement of the contractor’s insurance provider, any request by an employee or applicant for employment benefits or any documentation of eligibility for benefits submitted by an employee or applicant for employment.\n(c) After taking all reasonable measures to find a contractor that complies with this section, as determined by the state agency, the requirements of this section may be waived under any of the following circumstances:\n(1) There is only one prospective contractor willing to enter into a specific contract with the state agency.\n(2) The contract is necessary to respond to an emergency, as determined by the state agency, that endangers the public health, welfare, or safety, or the contract is necessary for the provision of essential services, and no entity that complies with the requirements of this section capable of responding to the emergency is immediately available.\n(3) The requirements of this section violate, or are inconsistent with, the terms or conditions of a grant, subvention, or agreement, if the agency has made a good faith attempt to change the terms or conditions of any grant, subvention, or agreement to authorize application of this section.\n(4) The contractor is providing wholesale or bulk water, power, or natural gas, the conveyance or transmission of the same, or ancillary services, as required for ensuring reliable services in accordance with good utility practice, if the purchase of the same cannot practically be accomplished through the standard competitive bidding procedures and the contractor is not providing direct retail services to end users.\n(d) (1) A contractor shall not be deemed to discriminate in the provision of benefits if the contractor, in providing the benefits, pays the actual costs incurred in obtaining the benefit.\n(2) If a contractor is unable to provide a certain benefit, despite taking reasonable measures to do so, the contractor shall not be deemed to discriminate in the provision of benefits.\n(e) (1) Every contract subject to this chapter shall contain a statement by which the contractor certifies that the contractor is in compliance with this section.\n(2) The department or other contracting agency shall enforce this section pursuant to its existing enforcement powers.\n(3) (A) If a contractor falsely certifies that it is in compliance with this section, the contract with that contractor shall be subject to Article 9 (commencing with Section 10420), unless, within a time period specified by the department or other contracting agency, the contractor provides to the department or agency proof that it has complied, or is in the process of complying, with this section.\n(B) The application of the remedies or penalties contained in Article 9 (commencing with Section 10420) to a contract subject to this chapter shall not preclude the application of any existing remedies otherwise available to the department or other contracting agency under its existing enforcement powers.\n(f) Nothing in this section is intended to regulate the contracting practices of any local jurisdiction.\n(g) This section shall be construed so as not to conflict with applicable federal laws, rules, or regulations. In the event that a court or agency of competent jurisdiction holds that federal law, rule, or regulation invalidates any clause, sentence, paragraph, or section of this code or the application thereof to any person or circumstances, it is the intent of the state that the court or agency sever that clause, sentence, paragraph, or section so that the remainder of this section shall remain in effect.\nSEC. 2.\nSection 10295.35 of the Public Contract Code shall not be construed to create any new enforcement authority or responsibility in the Department of General Services or any other contracting agency.\nSEC. 3.\nNo reimbursement is required by this act pursuant to Section 6 of Article XIII\u2009B of the California Constitution because the only costs that may be incurred by a local agency or school district will be incurred because this act creates a new crime or infraction, eliminates a crime or infraction, or changes the penalty for a crime or infraction, within the meaning of Section 17556 of the Government Code, or changes the definition of a crime within the meaning of Section 6 of Article XIII\u2009B of the California Constitution.&#x27;</span>, <span class="hljs-string">&#x27;title&#x27;</span>: <span class="hljs-string">&#x27;An act to add Section 10295.35 to the Public Contract Code, relating to public contracts.&#x27;</span>}<!-- HTML_TAG_END --></pre></div> <p>The <code>text</code> field is the input and the <code>summary</code> field is the target.</p> <h2 class="relative group"><a id="preprocess" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#preprocess"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Preprocess </span></h2> <p>Load the T5 tokenizer to process <code>text</code> and <code>summary</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;t5-small&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>The preprocessing function needs to:</p> <ol><li>Prefix the input with a prompt so T5 knows this is a summarization task. Some models capable of multiple NLP tasks require prompting for specific tasks.</li> <li>Use a context manager with the <code>as_target_tokenizer()</code> function to parallelize tokenization of inputs and labels.</li> <li>Truncate sequences to be no longer than the maximum length set by the <code>max_length</code> parameter.</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>prefix = <span class="hljs-string">&quot;summarize: &quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">preprocess_function</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> inputs = [prefix + doc <span class="hljs-keyword">for</span> doc <span class="hljs-keyword">in</span> examples[<span class="hljs-string">&quot;text&quot;</span>]] <span class="hljs-meta">... </span> model_inputs = tokenizer(inputs, max_length=<span class="hljs-number">1024</span>, truncation=<span class="hljs-literal">True</span>) <span class="hljs-meta">... </span> <span class="hljs-keyword">with</span> tokenizer.as_target_tokenizer(): <span class="hljs-meta">... </span> labels = tokenizer(examples[<span class="hljs-string">&quot;summary&quot;</span>], max_length=<span class="hljs-number">128</span>, truncation=<span class="hljs-literal">True</span>) <span class="hljs-meta">... </span> model_inputs[<span class="hljs-string">&quot;labels&quot;</span>] = labels[<span class="hljs-string">&quot;input_ids&quot;</span>] <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> model_inputs<!-- HTML_TAG_END --></pre></div> <p>Use 🤗 Datasets <a href="https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map" rel="nofollow"><code>map</code></a> function to apply the preprocessing function over the entire dataset. You can speed up the <code>map</code> function by setting <code>batched=True</code> to process multiple elements of the dataset at once:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tokenized_billsum = billsum.<span class="hljs-built_in">map</span>(preprocess_function, batched=<span class="hljs-literal">True</span>)<!-- HTML_TAG_END --></pre></div> <p>Use <a href="/docs/transformers/pr_16143/en/main_classes/data_collator#transformers.DataCollatorForSeq2Seq">DataCollatorForSeq2Seq</a> to create a batch of examples. It will also <em>dynamically pad</em> your text and labels to the length of the longest element in its batch, so they are a uniform length. While it is possible to pad your text in the <code>tokenizer</code> function by setting <code>padding=True</code>, dynamic padding is more efficient.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="Copy code excerpt to clipboard" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><div><div class="bg-white leading-none border border-gray-100 rounded-lg inline-flex p-0.5 text-sm mb-4 select-none"><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-l false"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <p class="!m-0 ">Pytorch</p> </button><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-r text-gray-500 filter grayscale"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <p class="!m-0 ">TensorFlow</p> </button></div></div><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorForSeq2Seq <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=model)<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="finetune-with-trainer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#finetune-with-trainer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Fine-tune with Trainer </span></h2> <p>Load T5 with <a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoModelForSeq2SeqLM">AutoModelForSeq2SeqLM</a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForSeq2SeqLM, Seq2SeqTrainingArguments, Seq2SeqTrainer <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;t5-small&quot;</span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If you aren’t familiar with fine-tuning a model with the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>, take a look at the basic tutorial <a href="training#finetune-with-trainer">here</a>!</p></div> <p>At this point, only three steps remain:</p> <ol><li>Define your training hyperparameters in <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Seq2SeqTrainingArguments">Seq2SeqTrainingArguments</a>.</li> <li>Pass the training arguments to <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Seq2SeqTrainer">Seq2SeqTrainer</a> along with the model, dataset, tokenizer, and data collator.</li> <li>Call <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer.train">train()</a> to fine-tune your model.</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>training_args = Seq2SeqTrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> evaluation_strategy=<span class="hljs-string">&quot;epoch&quot;</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">2e-5</span>, <span class="hljs-meta">... </span> per_device_train_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> per_device_eval_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> weight_decay=<span class="hljs-number">0.01</span>, <span class="hljs-meta">... </span> save_total_limit=<span class="hljs-number">3</span>, <span class="hljs-meta">... </span> num_train_epochs=<span class="hljs-number">1</span>, <span class="hljs-meta">... </span> fp16=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Seq2SeqTrainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=tokenized_billsum[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=tokenized_billsum[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> tokenizer=tokenizer, <span class="hljs-meta">... </span> data_collator=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="finetune-with-tensorflow" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#finetune-with-tensorflow"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Fine-tune with TensorFlow </span></h2> <p>To fine-tune a model in TensorFlow is just as easy, with only a few differences.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If you aren’t familiar with fine-tuning a model with Keras, take a look at the basic tutorial <a href="training#finetune-with-keras">here</a>!</p></div> <p>Convert your datasets to the <code>tf.data.Dataset</code> format with <a href="https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.to_tf_dataset" rel="nofollow"><code>to_tf_dataset</code></a>. Specify inputs and labels in <code>columns</code>, whether to shuffle the dataset order, batch size, and the data collator:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tf_train_set = tokenized_billsum[<span class="hljs-string">&quot;train&quot;</span>].to_tf_dataset( <span class="hljs-meta">... </span> columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;labels&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_test_set = tokenized_billsum[<span class="hljs-string">&quot;test&quot;</span>].to_tf_dataset( <span class="hljs-meta">... </span> columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;labels&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <p>Set up an optimizer function, learning rate schedule, and some training hyperparameters:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> create_optimizer, AdamWeightDecay <span class="hljs-meta">&gt;&gt;&gt; </span>optimizer = AdamWeightDecay(learning_rate=<span class="hljs-number">2e-5</span>, weight_decay_rate=<span class="hljs-number">0.01</span>)<!-- HTML_TAG_END --></pre></div> <p>Load T5 with <a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.TFAutoModelForSeq2SeqLM">TFAutoModelForSeq2SeqLM</a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForSeq2SeqLM <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;t5-small&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Configure the model for training with <a href="https://keras.io/api/models/model_training_apis/#compile-method" rel="nofollow"><code>compile</code></a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>model.<span class="hljs-built_in">compile</span>(optimizer=optimizer)<!-- HTML_TAG_END --></pre></div> <p>Call <a href="https://keras.io/api/models/model_training_apis/#fit-method" rel="nofollow"><code>fit</code></a> to fine-tune the model:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>model.fit(x=tf_train_set, validation_data=tf_test_set, epochs=<span class="hljs-number">3</span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>For a more in-depth example of how to fine-tune a model for summarization, take a look at the corresponding <a href="https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/summarization.ipynb" rel="nofollow">PyTorch notebook</a> or <a href="https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/summarization-tf.ipynb" rel="nofollow">TensorFlow notebook</a>.</p></div> <script type="module" data-hydrate="189zp8r"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="189zp8r"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/tasks/summarization.mdx-34e69920.js") ], params: {} } }); </script>
445
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/tasks/asr.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;automatic-speech-recognition&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;load-timit-dataset&quot;,&quot;title&quot;:&quot;Load TIMIT dataset&quot;},{&quot;local&quot;:&quot;preprocess&quot;,&quot;title&quot;:&quot;Preprocess&quot;},{&quot;local&quot;:&quot;finetune-with-trainer&quot;,&quot;title&quot;:&quot;Fine-tune with Trainer&quot;}],&quot;title&quot;:&quot;Automatic speech recognition&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/tasks/asr.mdx-b010f320.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Youtube-27813aed.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="automatic-speech-recognition" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#automatic-speech-recognition"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Automatic speech recognition </span></h1> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/TksaY_FDgnk" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>Automatic speech recognition (ASR) converts a speech signal to text. It is an example of a sequence-to-sequence task, going from a sequence of audio inputs to textual outputs. Voice assistants like Siri and Alexa utilize ASR models to assist users.</p> <p>This guide will show you how to fine-tune <a href="https://huggingface.co/facebook/wav2vec2-base" rel="nofollow">Wav2Vec2</a> on the <a href="https://huggingface.co/datasets/timit_asr" rel="nofollow">TIMIT</a> dataset to transcribe audio to text.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>See the automatic speech recognition <a href="https://huggingface.co/tasks/automatic-speech-recognition" rel="nofollow">task page</a> for more information about its associated models, datasets, and metrics.</p></div> <h2 class="relative group"><a id="load-timit-dataset" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#load-timit-dataset"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Load TIMIT dataset </span></h2> <p>Load the TIMIT dataset from the 🤗 Datasets library:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>timit = load_dataset(<span class="hljs-string">&quot;timit_asr&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Then take a look at an example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>timit DatasetDict({ train: Dataset({ features: [<span class="hljs-string">&#x27;file&#x27;</span>, <span class="hljs-string">&#x27;audio&#x27;</span>, <span class="hljs-string">&#x27;text&#x27;</span>, <span class="hljs-string">&#x27;phonetic_detail&#x27;</span>, <span class="hljs-string">&#x27;word_detail&#x27;</span>, <span class="hljs-string">&#x27;dialect_region&#x27;</span>, <span class="hljs-string">&#x27;sentence_type&#x27;</span>, <span class="hljs-string">&#x27;speaker_id&#x27;</span>, <span class="hljs-string">&#x27;id&#x27;</span>], num_rows: <span class="hljs-number">4620</span> }) test: Dataset({ features: [<span class="hljs-string">&#x27;file&#x27;</span>, <span class="hljs-string">&#x27;audio&#x27;</span>, <span class="hljs-string">&#x27;text&#x27;</span>, <span class="hljs-string">&#x27;phonetic_detail&#x27;</span>, <span class="hljs-string">&#x27;word_detail&#x27;</span>, <span class="hljs-string">&#x27;dialect_region&#x27;</span>, <span class="hljs-string">&#x27;sentence_type&#x27;</span>, <span class="hljs-string">&#x27;speaker_id&#x27;</span>, <span class="hljs-string">&#x27;id&#x27;</span>], num_rows: <span class="hljs-number">1680</span> }) })<!-- HTML_TAG_END --></pre></div> <p>While the dataset contains a lot of helpful information, like <code>dialect_region</code> and <code>sentence_type</code>, you will focus on the <code>audio</code> and <code>text</code> fields in this guide. Remove the other columns:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>timit = timit.remove_columns( <span class="hljs-meta">... </span> [<span class="hljs-string">&quot;phonetic_detail&quot;</span>, <span class="hljs-string">&quot;word_detail&quot;</span>, <span class="hljs-string">&quot;dialect_region&quot;</span>, <span class="hljs-string">&quot;id&quot;</span>, <span class="hljs-string">&quot;sentence_type&quot;</span>, <span class="hljs-string">&quot;speaker_id&quot;</span>] <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <p>Take a look at the example again:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>timit[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;audio&#x27;</span>: {<span class="hljs-string">&#x27;array&#x27;</span>: array([-<span class="hljs-number">2.1362305e-04</span>, <span class="hljs-number">6.1035156e-05</span>, <span class="hljs-number">3.0517578e-05</span>, ..., -<span class="hljs-number">3.0517578e-05</span>, -<span class="hljs-number">9.1552734e-05</span>, -<span class="hljs-number">6.1035156e-05</span>], dtype=float32), <span class="hljs-string">&#x27;path&#x27;</span>: <span class="hljs-string">&#x27;/root/.cache/huggingface/datasets/downloads/extracted/404950a46da14eac65eb4e2a8317b1372fb3971d980d91d5d5b221275b1fd7e0/data/TRAIN/DR4/MMDM0/SI681.WAV&#x27;</span>, <span class="hljs-string">&#x27;sampling_rate&#x27;</span>: <span class="hljs-number">16000</span>}, <span class="hljs-string">&#x27;file&#x27;</span>: <span class="hljs-string">&#x27;/root/.cache/huggingface/datasets/downloads/extracted/404950a46da14eac65eb4e2a8317b1372fb3971d980d91d5d5b221275b1fd7e0/data/TRAIN/DR4/MMDM0/SI681.WAV&#x27;</span>, <span class="hljs-string">&#x27;text&#x27;</span>: <span class="hljs-string">&#x27;Would such an act of refusal be useful?&#x27;</span>}<!-- HTML_TAG_END --></pre></div> <p>The <code>audio</code> column contains a 1-dimensional <code>array</code> of the speech signal that must be called to load and resample the audio file.</p> <h2 class="relative group"><a id="preprocess" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#preprocess"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Preprocess </span></h2> <p>Load the Wav2Vec2 processor to process the audio signal and transcribed text:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoProcessor <span class="hljs-meta">&gt;&gt;&gt; </span>processor = AutoProcessor.from_pretrained(<span class="hljs-string">&quot;facebook/wav2vec2-base&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>The preprocessing function needs to:</p> <ol><li>Call the <code>audio</code> column to load and resample the audio file.</li> <li>Extract the <code>input_values</code> from the audio file.</li> <li>Typically, when you call the processor, you call the feature extractor. Since you also want to tokenize text, instruct the processor to call the tokenizer instead with a context manager.</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">prepare_dataset</span>(<span class="hljs-params">batch</span>): <span class="hljs-meta">... </span> audio = batch[<span class="hljs-string">&quot;audio&quot;</span>] <span class="hljs-meta">... </span> batch[<span class="hljs-string">&quot;input_values&quot;</span>] = processor(audio[<span class="hljs-string">&quot;array&quot;</span>], sampling_rate=audio[<span class="hljs-string">&quot;sampling_rate&quot;</span>]).input_values[<span class="hljs-number">0</span>] <span class="hljs-meta">... </span> batch[<span class="hljs-string">&quot;input_length&quot;</span>] = <span class="hljs-built_in">len</span>(batch[<span class="hljs-string">&quot;input_values&quot;</span>]) <span class="hljs-meta">... </span> <span class="hljs-keyword">with</span> processor.as_target_processor(): <span class="hljs-meta">... </span> batch[<span class="hljs-string">&quot;labels&quot;</span>] = processor(batch[<span class="hljs-string">&quot;text&quot;</span>]).input_ids <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> batch<!-- HTML_TAG_END --></pre></div> <p>Use 🤗 Datasets <a href="https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map" rel="nofollow"><code>map</code></a> function to apply the preprocessing function over the entire dataset. You can speed up the map function by increasing the number of processes with <code>num_proc</code>. Remove the columns you don’t need:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>timit = timit.<span class="hljs-built_in">map</span>(prepare_dataset, remove_columns=timit.column_names[<span class="hljs-string">&quot;train&quot;</span>], num_proc=<span class="hljs-number">4</span>)<!-- HTML_TAG_END --></pre></div> <p>🤗 Transformers doesn’t have a data collator for automatic speech recognition, so you will need to create one. You can adapt the <a href="/docs/transformers/pr_16143/en/main_classes/data_collator#transformers.DataCollatorWithPadding">DataCollatorWithPadding</a> to create a batch of examples for automatic speech recognition. It will also dynamically pad your text and labels to the length of the longest element in its batch, so they are a uniform length. While it is possible to pad your text in the <code>tokenizer</code> function by setting <code>padding=True</code>, dynamic padding is more efficient.</p> <p>Unlike other data collators, this specific data collator needs to apply a different padding method to <code>input_values</code> and <code>labels</code>. You can apply a different padding method with a context manager:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> dataclasses <span class="hljs-keyword">import</span> dataclass, field <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> typing <span class="hljs-keyword">import</span> <span class="hljs-type">Any</span>, <span class="hljs-type">Dict</span>, <span class="hljs-type">List</span>, <span class="hljs-type">Optional</span>, <span class="hljs-type">Union</span> <span class="hljs-meta">&gt;&gt;&gt; </span>@dataclass <span class="hljs-meta">... </span><span class="hljs-keyword">class</span> <span class="hljs-title class_">DataCollatorCTCWithPadding</span>: <span class="hljs-meta">... </span> processor: AutoProcessor <span class="hljs-meta">... </span> padding: <span class="hljs-type">Union</span>[<span class="hljs-built_in">bool</span>, <span class="hljs-built_in">str</span>] = <span class="hljs-literal">True</span> <span class="hljs-meta">... </span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">__call__</span>(<span class="hljs-params">self, features: <span class="hljs-type">List</span>[<span class="hljs-type">Dict</span>[<span class="hljs-built_in">str</span>, <span class="hljs-type">Union</span>[<span class="hljs-type">List</span>[<span class="hljs-built_in">int</span>], torch.Tensor]]]</span>) -&gt; <span class="hljs-type">Dict</span>[<span class="hljs-built_in">str</span>, torch.Tensor]: <span class="hljs-meta">... </span> <span class="hljs-comment"># split inputs and labels since they have to be of different lengths and need</span> <span class="hljs-meta">... </span> <span class="hljs-comment"># different padding methods</span> <span class="hljs-meta">... </span> input_features = [{<span class="hljs-string">&quot;input_values&quot;</span>: feature[<span class="hljs-string">&quot;input_values&quot;</span>]} <span class="hljs-keyword">for</span> feature <span class="hljs-keyword">in</span> features] <span class="hljs-meta">... </span> label_features = [{<span class="hljs-string">&quot;input_ids&quot;</span>: feature[<span class="hljs-string">&quot;labels&quot;</span>]} <span class="hljs-keyword">for</span> feature <span class="hljs-keyword">in</span> features] <span class="hljs-meta">... </span> batch = self.processor.pad( <span class="hljs-meta">... </span> input_features, <span class="hljs-meta">... </span> padding=self.padding, <span class="hljs-meta">... </span> return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, <span class="hljs-meta">... </span> ) <span class="hljs-meta">... </span> <span class="hljs-keyword">with</span> self.processor.as_target_processor(): <span class="hljs-meta">... </span> labels_batch = self.processor.pad( <span class="hljs-meta">... </span> label_features, <span class="hljs-meta">... </span> padding=self.padding, <span class="hljs-meta">... </span> return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, <span class="hljs-meta">... </span> ) <span class="hljs-meta">... </span> <span class="hljs-comment"># replace padding with -100 to ignore loss correctly</span> <span class="hljs-meta">... </span> labels = labels_batch[<span class="hljs-string">&quot;input_ids&quot;</span>].masked_fill(labels_batch.attention_mask.ne(<span class="hljs-number">1</span>), -<span class="hljs-number">100</span>) <span class="hljs-meta">... </span> batch[<span class="hljs-string">&quot;labels&quot;</span>] = labels <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> batch<!-- HTML_TAG_END --></pre></div> <p>Create a batch of examples and dynamically pad them with <code>DataCollatorForCTCWithPadding</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorCTCWithPadding(processor=processor, padding=<span class="hljs-literal">True</span>)<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="finetune-with-trainer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#finetune-with-trainer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Fine-tune with Trainer </span></h2> <p>Load Wav2Vec2 with <a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoModelForCTC">AutoModelForCTC</a>. For <code>ctc_loss_reduction</code>, it is often better to use the average instead of the default summation:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForCTC, TrainingArguments, Trainer <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForCTC.from_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;facebook/wav2vec-base&quot;</span>, <span class="hljs-meta">... </span> ctc_loss_reduction=<span class="hljs-string">&quot;mean&quot;</span>, <span class="hljs-meta">... </span> pad_token_id=processor.tokenizer.pad_token_id, <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If you aren’t familiar with fine-tuning a model with the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>, take a look at the basic tutorial <a href="training#finetune-with-trainer">here</a>!</p></div> <p>At this point, only three steps remain:</p> <ol><li>Define your training hyperparameters in <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a>.</li> <li>Pass the training arguments to <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> along with the model, datasets, tokenizer, and data collator.</li> <li>Call <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer.train">train()</a> to fine-tune your model.</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> group_by_length=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> per_device_train_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> evaluation_strategy=<span class="hljs-string">&quot;steps&quot;</span>, <span class="hljs-meta">... </span> num_train_epochs=<span class="hljs-number">3</span>, <span class="hljs-meta">... </span> fp16=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> gradient_checkpointing=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">1e-4</span>, <span class="hljs-meta">... </span> weight_decay=<span class="hljs-number">0.005</span>, <span class="hljs-meta">... </span> save_total_limit=<span class="hljs-number">2</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=timit[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=timit[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> tokenizer=processor.feature_extractor, <span class="hljs-meta">... </span> data_collator=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>For a more in-depth example of how to fine-tune a model for automatic speech recognition, take a look at this blog <a href="https://huggingface.co/blog/fine-tune-wav2vec2-english" rel="nofollow">post</a> for English ASR and this <a href="https://huggingface.co/blog/fine-tune-xlsr-wav2vec2" rel="nofollow">post</a> for multilingual ASR.</p></div> <script type="module" data-hydrate="trmq53"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="trmq53"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/tasks/asr.mdx-b010f320.js") ], params: {} } }); </script>
446
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/tasks/token_classification.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;token-classification&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;load-wnut-17-dataset&quot;,&quot;title&quot;:&quot;Load WNUT 17 dataset&quot;},{&quot;local&quot;:&quot;preprocess&quot;,&quot;title&quot;:&quot;Preprocess&quot;},{&quot;local&quot;:&quot;finetune-with-trainer&quot;,&quot;title&quot;:&quot;Fine-tune with Trainer&quot;},{&quot;local&quot;:&quot;finetune-with-tensorflow&quot;,&quot;title&quot;:&quot;Fine-tune with TensorFlow&quot;}],&quot;title&quot;:&quot;Token classification&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/tasks/token_classification.mdx-5446e455.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Youtube-27813aed.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlockFw-27a176a0.js"> <h1 class="relative group"><a id="token-classification" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#token-classification"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Token classification </span></h1> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/wVHdVlPScxA" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>Token classification assigns a label to individual tokens in a sentence. One of the most common token classification tasks is Named Entity Recognition (NER). NER attempts to find a label for each entity in a sentence, such as a person, location, or organization. </p> <p>This guide will show you how to fine-tune <a href="https://huggingface.co/distilbert-base-uncased" rel="nofollow">DistilBERT</a> on the <a href="https://huggingface.co/datasets/wnut_17" rel="nofollow">WNUT 17</a> dataset to detect new entities.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>See the token classification <a href="https://huggingface.co/tasks/token-classification" rel="nofollow">task page</a> for more information about other forms of token classification and their associated models, datasets, and metrics.</p></div> <h2 class="relative group"><a id="load-wnut-17-dataset" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#load-wnut-17-dataset"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Load WNUT 17 dataset </span></h2> <p>Load the WNUT 17 dataset from the 🤗 Datasets library:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>wnut = load_dataset(<span class="hljs-string">&quot;wnut_17&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Then take a look at an example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>wnut[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;id&#x27;</span>: <span class="hljs-string">&#x27;0&#x27;</span>, <span class="hljs-string">&#x27;ner_tags&#x27;</span>: [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">7</span>, <span class="hljs-number">8</span>, <span class="hljs-number">8</span>, <span class="hljs-number">0</span>, <span class="hljs-number">7</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], <span class="hljs-string">&#x27;tokens&#x27;</span>: [<span class="hljs-string">&#x27;@paulwalk&#x27;</span>, <span class="hljs-string">&#x27;It&#x27;</span>, <span class="hljs-string">&quot;&#x27;s&quot;</span>, <span class="hljs-string">&#x27;the&#x27;</span>, <span class="hljs-string">&#x27;view&#x27;</span>, <span class="hljs-string">&#x27;from&#x27;</span>, <span class="hljs-string">&#x27;where&#x27;</span>, <span class="hljs-string">&#x27;I&#x27;</span>, <span class="hljs-string">&quot;&#x27;m&quot;</span>, <span class="hljs-string">&#x27;living&#x27;</span>, <span class="hljs-string">&#x27;for&#x27;</span>, <span class="hljs-string">&#x27;two&#x27;</span>, <span class="hljs-string">&#x27;weeks&#x27;</span>, <span class="hljs-string">&#x27;.&#x27;</span>, <span class="hljs-string">&#x27;Empire&#x27;</span>, <span class="hljs-string">&#x27;State&#x27;</span>, <span class="hljs-string">&#x27;Building&#x27;</span>, <span class="hljs-string">&#x27;=&#x27;</span>, <span class="hljs-string">&#x27;ESB&#x27;</span>, <span class="hljs-string">&#x27;.&#x27;</span>, <span class="hljs-string">&#x27;Pretty&#x27;</span>, <span class="hljs-string">&#x27;bad&#x27;</span>, <span class="hljs-string">&#x27;storm&#x27;</span>, <span class="hljs-string">&#x27;here&#x27;</span>, <span class="hljs-string">&#x27;last&#x27;</span>, <span class="hljs-string">&#x27;evening&#x27;</span>, <span class="hljs-string">&#x27;.&#x27;</span>] }<!-- HTML_TAG_END --></pre></div> <p>Each number in <code>ner_tags</code> represents an entity. Convert the number to a label name for more information:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>label_list = wnut[<span class="hljs-string">&quot;train&quot;</span>].features[<span class="hljs-string">f&quot;ner_tags&quot;</span>].feature.names <span class="hljs-meta">&gt;&gt;&gt; </span>label_list [ <span class="hljs-string">&quot;O&quot;</span>, <span class="hljs-string">&quot;B-corporation&quot;</span>, <span class="hljs-string">&quot;I-corporation&quot;</span>, <span class="hljs-string">&quot;B-creative-work&quot;</span>, <span class="hljs-string">&quot;I-creative-work&quot;</span>, <span class="hljs-string">&quot;B-group&quot;</span>, <span class="hljs-string">&quot;I-group&quot;</span>, <span class="hljs-string">&quot;B-location&quot;</span>, <span class="hljs-string">&quot;I-location&quot;</span>, <span class="hljs-string">&quot;B-person&quot;</span>, <span class="hljs-string">&quot;I-person&quot;</span>, <span class="hljs-string">&quot;B-product&quot;</span>, <span class="hljs-string">&quot;I-product&quot;</span>, ]<!-- HTML_TAG_END --></pre></div> <p>The <code>ner_tag</code> describes an entity, such as a corporation, location, or person. The letter that prefixes each <code>ner_tag</code> indicates the token position of the entity:</p> <ul><li><code>B-</code> indicates the beginning of an entity.</li> <li><code>I-</code> indicates a token is contained inside the same entity (e.g., the <code>State</code> token is a part of an entity like <code>Empire State Building</code>).</li> <li><code>0</code> indicates the token doesn’t correspond to any entity.</li></ul> <h2 class="relative group"><a id="preprocess" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#preprocess"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Preprocess </span></h2> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/iY2AZYdZAr0" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>Load the DistilBERT tokenizer to process the <code>tokens</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Since the input has already been split into words, set <code>is_split_into_words=True</code> to tokenize the words into subwords:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tokenized_input = tokenizer(example[<span class="hljs-string">&quot;tokens&quot;</span>], is_split_into_words=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokens = tokenizer.convert_ids_to_tokens(tokenized_input[<span class="hljs-string">&quot;input_ids&quot;</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>tokens [<span class="hljs-string">&#x27;[CLS]&#x27;</span>, <span class="hljs-string">&#x27;@&#x27;</span>, <span class="hljs-string">&#x27;paul&#x27;</span>, <span class="hljs-string">&#x27;##walk&#x27;</span>, <span class="hljs-string">&#x27;it&#x27;</span>, <span class="hljs-string">&quot;&#x27;&quot;</span>, <span class="hljs-string">&#x27;s&#x27;</span>, <span class="hljs-string">&#x27;the&#x27;</span>, <span class="hljs-string">&#x27;view&#x27;</span>, <span class="hljs-string">&#x27;from&#x27;</span>, <span class="hljs-string">&#x27;where&#x27;</span>, <span class="hljs-string">&#x27;i&#x27;</span>, <span class="hljs-string">&quot;&#x27;&quot;</span>, <span class="hljs-string">&#x27;m&#x27;</span>, <span class="hljs-string">&#x27;living&#x27;</span>, <span class="hljs-string">&#x27;for&#x27;</span>, <span class="hljs-string">&#x27;two&#x27;</span>, <span class="hljs-string">&#x27;weeks&#x27;</span>, <span class="hljs-string">&#x27;.&#x27;</span>, <span class="hljs-string">&#x27;empire&#x27;</span>, <span class="hljs-string">&#x27;state&#x27;</span>, <span class="hljs-string">&#x27;building&#x27;</span>, <span class="hljs-string">&#x27;=&#x27;</span>, <span class="hljs-string">&#x27;es&#x27;</span>, <span class="hljs-string">&#x27;##b&#x27;</span>, <span class="hljs-string">&#x27;.&#x27;</span>, <span class="hljs-string">&#x27;pretty&#x27;</span>, <span class="hljs-string">&#x27;bad&#x27;</span>, <span class="hljs-string">&#x27;storm&#x27;</span>, <span class="hljs-string">&#x27;here&#x27;</span>, <span class="hljs-string">&#x27;last&#x27;</span>, <span class="hljs-string">&#x27;evening&#x27;</span>, <span class="hljs-string">&#x27;.&#x27;</span>, <span class="hljs-string">&#x27;[SEP]&#x27;</span>]<!-- HTML_TAG_END --></pre></div> <p>Adding the special tokens <code>[CLS]</code> and <code>[SEP]</code> and subword tokenization creates a mismatch between the input and labels. A single word corresponding to a single label may be split into two subwords. You will need to realign the tokens and labels by:</p> <ol><li>Mapping all tokens to their corresponding word with the <a href="https://huggingface.co/docs/tokenizers/python/latest/api/reference.html#tokenizers.Encoding.word_ids" rel="nofollow"><code>word_ids</code></a> method.</li> <li>Assigning the label <code>-100</code> to the special tokens <code>[CLS]</code> and <code>[SEP]</code> so the PyTorch loss function ignores them.</li> <li>Only labeling the first token of a given word. Assign <code>-100</code> to other subtokens from the same word.</li></ol> <p>Here is how you can create a function to realign the tokens and labels, and truncate sequences to be no longer than DistilBERT’s maximum input length::</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">tokenize_and_align_labels</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> tokenized_inputs = tokenizer(examples[<span class="hljs-string">&quot;tokens&quot;</span>], truncation=<span class="hljs-literal">True</span>, is_split_into_words=<span class="hljs-literal">True</span>) <span class="hljs-meta">... </span> labels = [] <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> i, label <span class="hljs-keyword">in</span> <span class="hljs-built_in">enumerate</span>(examples[<span class="hljs-string">f&quot;ner_tags&quot;</span>]): <span class="hljs-meta">... </span> word_ids = tokenized_inputs.word_ids(batch_index=i) <span class="hljs-comment"># Map tokens to their respective word.</span> <span class="hljs-meta">... </span> previous_word_idx = <span class="hljs-literal">None</span> <span class="hljs-meta">... </span> label_ids = [] <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> word_idx <span class="hljs-keyword">in</span> word_ids: <span class="hljs-comment"># Set the special tokens to -100.</span> <span class="hljs-meta">... </span> <span class="hljs-keyword">if</span> word_idx <span class="hljs-keyword">is</span> <span class="hljs-literal">None</span>: <span class="hljs-meta">... </span> label_ids.append(-<span class="hljs-number">100</span>) <span class="hljs-meta">... </span> <span class="hljs-keyword">elif</span> word_idx != previous_word_idx: <span class="hljs-comment"># Only label the first token of a given word.</span> <span class="hljs-meta">... </span> label_ids.append(label[word_idx]) <span class="hljs-meta">... </span> <span class="hljs-keyword">else</span>: <span class="hljs-meta">... </span> label_ids.append(-<span class="hljs-number">100</span>) <span class="hljs-meta">... </span> previous_word_idx = word_idx <span class="hljs-meta">... </span> labels.append(label_ids) <span class="hljs-meta">... </span> tokenized_inputs[<span class="hljs-string">&quot;labels&quot;</span>] = labels <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> tokenized_inputs<!-- HTML_TAG_END --></pre></div> <p>Use 🤗 Datasets <a href="https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map" rel="nofollow"><code>map</code></a> function to tokenize and align the labels over the entire dataset. You can speed up the <code>map</code> function by setting <code>batched=True</code> to process multiple elements of the dataset at once:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tokenized_wnut = wnut.<span class="hljs-built_in">map</span>(tokenize_and_align_labels, batched=<span class="hljs-literal">True</span>)<!-- HTML_TAG_END --></pre></div> <p>Use <a href="/docs/transformers/pr_16143/en/main_classes/data_collator#transformers.DataCollatorForTokenClassification">DataCollatorForTokenClassification</a> to create a batch of examples. It will also <em>dynamically pad</em> your text and labels to the length of the longest element in its batch, so they are a uniform length. While it is possible to pad your text in the <code>tokenizer</code> function by setting <code>padding=True</code>, dynamic padding is more efficient.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="Copy code excerpt to clipboard" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><div><div class="bg-white leading-none border border-gray-100 rounded-lg inline-flex p-0.5 text-sm mb-4 select-none"><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-l false"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <p class="!m-0 ">Pytorch</p> </button><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-r text-gray-500 filter grayscale"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <p class="!m-0 ">TensorFlow</p> </button></div></div><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorForTokenClassification(tokenizer=tokenizer)<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="finetune-with-trainer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#finetune-with-trainer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Fine-tune with Trainer </span></h2> <p>Load DistilBERT with <a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoModelForTokenClassification">AutoModelForTokenClassification</a> along with the number of expected labels:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForTokenClassification, TrainingArguments, Trainer <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForTokenClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>, num_labels=<span class="hljs-number">14</span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If you aren’t familiar with fine-tuning a model with the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>, take a look at the basic tutorial <a href="training#finetune-with-trainer">here</a>!</p></div> <p>At this point, only three steps remain:</p> <ol><li>Define your training hyperparameters in <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a>.</li> <li>Pass the training arguments to <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> along with the model, dataset, tokenizer, and data collator.</li> <li>Call <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer.train">train()</a> to fine-tune your model.</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> evaluation_strategy=<span class="hljs-string">&quot;epoch&quot;</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">2e-5</span>, <span class="hljs-meta">... </span> per_device_train_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> per_device_eval_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> num_train_epochs=<span class="hljs-number">3</span>, <span class="hljs-meta">... </span> weight_decay=<span class="hljs-number">0.01</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=tokenized_wnut[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=tokenized_wnut[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> tokenizer=tokenizer, <span class="hljs-meta">... </span> data_collator=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="finetune-with-tensorflow" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#finetune-with-tensorflow"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Fine-tune with TensorFlow </span></h2> <p>To fine-tune a model in TensorFlow is just as easy, with only a few differences.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If you aren’t familiar with fine-tuning a model with Keras, take a look at the basic tutorial <a href="training#finetune-with-keras">here</a>!</p></div> <p>Convert your datasets to the <code>tf.data.Dataset</code> format with <a href="https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.to_tf_dataset" rel="nofollow"><code>to_tf_dataset</code></a>. Specify inputs and labels in <code>columns</code>, whether to shuffle the dataset order, batch size, and the data collator:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tf_train_set = tokenized_wnut[<span class="hljs-string">&quot;train&quot;</span>].to_tf_dataset( <span class="hljs-meta">... </span> columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;labels&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_validation_set = tokenized_wnut[<span class="hljs-string">&quot;validation&quot;</span>].to_tf_dataset( <span class="hljs-meta">... </span> columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;labels&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <p>Set up an optimizer function, learning rate schedule, and some training hyperparameters:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> create_optimizer <span class="hljs-meta">&gt;&gt;&gt; </span>batch_size = <span class="hljs-number">16</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_train_epochs = <span class="hljs-number">3</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_train_steps = (<span class="hljs-built_in">len</span>(tokenized_wnut[<span class="hljs-string">&quot;train&quot;</span>]) // batch_size) * num_train_epochs <span class="hljs-meta">&gt;&gt;&gt; </span>optimizer, lr_schedule = create_optimizer( <span class="hljs-meta">... </span> init_lr=<span class="hljs-number">2e-5</span>, <span class="hljs-meta">... </span> num_train_steps=num_train_steps, <span class="hljs-meta">... </span> weight_decay_rate=<span class="hljs-number">0.01</span>, <span class="hljs-meta">... </span> num_warmup_steps=<span class="hljs-number">0</span>, <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <p>Load DistilBERT with <a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.TFAutoModelForTokenClassification">TFAutoModelForTokenClassification</a> along with the number of expected labels:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForTokenClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>, num_labels=<span class="hljs-number">2</span>)<!-- HTML_TAG_END --></pre></div> <p>Configure the model for training with <a href="https://keras.io/api/models/model_training_apis/#compile-method" rel="nofollow"><code>compile</code></a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>model.<span class="hljs-built_in">compile</span>(optimizer=optimizer)<!-- HTML_TAG_END --></pre></div> <p>Call <a href="https://keras.io/api/models/model_training_apis/#fit-method" rel="nofollow"><code>fit</code></a> to fine-tune the model:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=<span class="hljs-number">3</span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>For a more in-depth example of how to fine-tune a model for token classification, take a look at the corresponding <a href="https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/token_classification.ipynb" rel="nofollow">PyTorch notebook</a> or <a href="https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/token_classification-tf.ipynb" rel="nofollow">TensorFlow notebook</a>.</p></div> <script type="module" data-hydrate="b1nied"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="b1nied"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/tasks/token_classification.mdx-5446e455.js") ], params: {} } }); </script>
447
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/tasks/multiple_choice.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;multiple-choice&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;load-swag-dataset&quot;,&quot;title&quot;:&quot;Load SWAG dataset&quot;},{&quot;local&quot;:&quot;preprocess&quot;,&quot;title&quot;:&quot;Preprocess&quot;},{&quot;local&quot;:&quot;finetune-with-trainer&quot;,&quot;title&quot;:&quot;Fine-tune with Trainer&quot;},{&quot;local&quot;:&quot;finetune-with-tensorflow&quot;,&quot;title&quot;:&quot;Fine-tune with TensorFlow&quot;}],&quot;title&quot;:&quot;Multiple choice&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/tasks/multiple_choice.mdx-7c0ab1b6.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlockFw-27a176a0.js"> <h1 class="relative group"><a id="multiple-choice" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#multiple-choice"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Multiple choice </span></h1> <p>A multiple choice task is similar to question answering, except several candidate answers are provided along with a context. The model is trained to select the correct answer from multiple inputs given a context.</p> <p>This guide will show you how to fine-tune <a href="https://huggingface.co/bert-base-uncased" rel="nofollow">BERT</a> on the <code>regular</code> configuration of the <a href="https://huggingface.co/datasets/swag" rel="nofollow">SWAG</a> dataset to select the best answer given multiple options and some context.</p> <h2 class="relative group"><a id="load-swag-dataset" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#load-swag-dataset"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Load SWAG dataset </span></h2> <p>Load the SWAG dataset from the 🤗 Datasets library:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>swag = load_dataset(<span class="hljs-string">&quot;swag&quot;</span>, <span class="hljs-string">&quot;regular&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Then take a look at an example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>swag[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;ending0&#x27;</span>: <span class="hljs-string">&#x27;passes by walking down the street playing their instruments.&#x27;</span>, <span class="hljs-string">&#x27;ending1&#x27;</span>: <span class="hljs-string">&#x27;has heard approaching them.&#x27;</span>, <span class="hljs-string">&#x27;ending2&#x27;</span>: <span class="hljs-string">&quot;arrives and they&#x27;re outside dancing and asleep.&quot;</span>, <span class="hljs-string">&#x27;ending3&#x27;</span>: <span class="hljs-string">&#x27;turns the lead singer watches the performance.&#x27;</span>, <span class="hljs-string">&#x27;fold-ind&#x27;</span>: <span class="hljs-string">&#x27;3416&#x27;</span>, <span class="hljs-string">&#x27;gold-source&#x27;</span>: <span class="hljs-string">&#x27;gold&#x27;</span>, <span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-number">0</span>, <span class="hljs-string">&#x27;sent1&#x27;</span>: <span class="hljs-string">&#x27;Members of the procession walk down the street holding small horn brass instruments.&#x27;</span>, <span class="hljs-string">&#x27;sent2&#x27;</span>: <span class="hljs-string">&#x27;A drum line&#x27;</span>, <span class="hljs-string">&#x27;startphrase&#x27;</span>: <span class="hljs-string">&#x27;Members of the procession walk down the street holding small horn brass instruments. A drum line&#x27;</span>, <span class="hljs-string">&#x27;video-id&#x27;</span>: <span class="hljs-string">&#x27;anetv_jkn6uvmqwh4&#x27;</span>}<!-- HTML_TAG_END --></pre></div> <p>The <code>sent1</code> and <code>sent2</code> fields show how a sentence begins, and each <code>ending</code> field shows how a sentence could end. Given the sentence beginning, the model must pick the correct sentence ending as indicated by the <code>label</code> field.</p> <h2 class="relative group"><a id="preprocess" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#preprocess"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Preprocess </span></h2> <p>Load the BERT tokenizer to process the start of each sentence and the four possible endings:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>The preprocessing function needs to do:</p> <ol><li>Make four copies of the <code>sent1</code> field so you can combine each of them with <code>sent2</code> to recreate how a sentence starts.</li> <li>Combine <code>sent2</code> with each of the four possible sentence endings.</li> <li>Flatten these two lists so you can tokenize them, and then unflatten them afterward so each example has a corresponding <code>input_ids</code>, <code>attention_mask</code>, and <code>labels</code> field.</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>ending_names = [<span class="hljs-string">&quot;ending0&quot;</span>, <span class="hljs-string">&quot;ending1&quot;</span>, <span class="hljs-string">&quot;ending2&quot;</span>, <span class="hljs-string">&quot;ending3&quot;</span>] <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">preprocess_function</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> first_sentences = [[context] * <span class="hljs-number">4</span> <span class="hljs-keyword">for</span> context <span class="hljs-keyword">in</span> examples[<span class="hljs-string">&quot;sent1&quot;</span>]] <span class="hljs-meta">... </span> question_headers = examples[<span class="hljs-string">&quot;sent2&quot;</span>] <span class="hljs-meta">... </span> second_sentences = [ <span class="hljs-meta">... </span> [<span class="hljs-string">f&quot;<span class="hljs-subst">{header}</span> <span class="hljs-subst">{examples[end][i]}</span>&quot;</span> <span class="hljs-keyword">for</span> end <span class="hljs-keyword">in</span> ending_names] <span class="hljs-keyword">for</span> i, header <span class="hljs-keyword">in</span> <span class="hljs-built_in">enumerate</span>(question_headers) <span class="hljs-meta">... </span> ] <span class="hljs-meta">... </span> first_sentences = <span class="hljs-built_in">sum</span>(first_sentences, []) <span class="hljs-meta">... </span> second_sentences = <span class="hljs-built_in">sum</span>(second_sentences, []) <span class="hljs-meta">... </span> tokenized_examples = tokenizer(first_sentences, second_sentences, truncation=<span class="hljs-literal">True</span>) <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> {k: [v[i : i + <span class="hljs-number">4</span>] <span class="hljs-keyword">for</span> i <span class="hljs-keyword">in</span> <span class="hljs-built_in">range</span>(<span class="hljs-number">0</span>, <span class="hljs-built_in">len</span>(v), <span class="hljs-number">4</span>)] <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> tokenized_examples.items()}<!-- HTML_TAG_END --></pre></div> <p>Use 🤗 Datasets <a href="https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map" rel="nofollow"><code>map</code></a> function to apply the preprocessing function over the entire dataset. You can speed up the <code>map</code> function by setting <code>batched=True</code> to process multiple elements of the dataset at once:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->tokenized_swag = swag.<span class="hljs-built_in">map</span>(preprocess_function, batched=<span class="hljs-literal">True</span>)<!-- HTML_TAG_END --></pre></div> <p>🤗 Transformers doesn’t have a data collator for multiple choice, so you will need to create one. You can adapt the <a href="/docs/transformers/pr_16143/en/main_classes/data_collator#transformers.DataCollatorWithPadding">DataCollatorWithPadding</a> to create a batch of examples for multiple choice. It will also <em>dynamically pad</em> your text and labels to the length of the longest element in its batch, so they are a uniform length. While it is possible to pad your text in the <code>tokenizer</code> function by setting <code>padding=True</code>, dynamic padding is more efficient.</p> <p><code>DataCollatorForMultipleChoice</code> will flatten all the model inputs, apply padding, and then unflatten the results:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="Copy code excerpt to clipboard" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><div><div class="bg-white leading-none border border-gray-100 rounded-lg inline-flex p-0.5 text-sm mb-4 select-none"><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-l false"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <p class="!m-0 ">Pytorch</p> </button><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-r text-gray-500 filter grayscale"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <p class="!m-0 ">TensorFlow</p> </button></div></div><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> dataclasses <span class="hljs-keyword">import</span> dataclass <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers.tokenization_utils_base <span class="hljs-keyword">import</span> PreTrainedTokenizerBase, PaddingStrategy <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> typing <span class="hljs-keyword">import</span> <span class="hljs-type">Optional</span>, <span class="hljs-type">Union</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>@dataclass <span class="hljs-meta">... </span><span class="hljs-keyword">class</span> <span class="hljs-title class_">DataCollatorForMultipleChoice</span>: <span class="hljs-meta">... </span> <span class="hljs-string">&quot;&quot;&quot; <span class="hljs-meta">... </span> Data collator that will dynamically pad the inputs for multiple choice received. <span class="hljs-meta">... </span> &quot;&quot;&quot;</span> <span class="hljs-meta">... </span> tokenizer: PreTrainedTokenizerBase <span class="hljs-meta">... </span> padding: <span class="hljs-type">Union</span>[<span class="hljs-built_in">bool</span>, <span class="hljs-built_in">str</span>, PaddingStrategy] = <span class="hljs-literal">True</span> <span class="hljs-meta">... </span> max_length: <span class="hljs-type">Optional</span>[<span class="hljs-built_in">int</span>] = <span class="hljs-literal">None</span> <span class="hljs-meta">... </span> pad_to_multiple_of: <span class="hljs-type">Optional</span>[<span class="hljs-built_in">int</span>] = <span class="hljs-literal">None</span> <span class="hljs-meta">... </span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">__call__</span>(<span class="hljs-params">self, features</span>): <span class="hljs-meta">... </span> label_name = <span class="hljs-string">&quot;label&quot;</span> <span class="hljs-keyword">if</span> <span class="hljs-string">&quot;label&quot;</span> <span class="hljs-keyword">in</span> features[<span class="hljs-number">0</span>].keys() <span class="hljs-keyword">else</span> <span class="hljs-string">&quot;labels&quot;</span> <span class="hljs-meta">... </span> labels = [feature.pop(label_name) <span class="hljs-keyword">for</span> feature <span class="hljs-keyword">in</span> features] <span class="hljs-meta">... </span> batch_size = <span class="hljs-built_in">len</span>(features) <span class="hljs-meta">... </span> num_choices = <span class="hljs-built_in">len</span>(features[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;input_ids&quot;</span>]) <span class="hljs-meta">... </span> flattened_features = [ <span class="hljs-meta">... </span> [{k: v[i] <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> feature.items()} <span class="hljs-keyword">for</span> i <span class="hljs-keyword">in</span> <span class="hljs-built_in">range</span>(num_choices)] <span class="hljs-keyword">for</span> feature <span class="hljs-keyword">in</span> features <span class="hljs-meta">... </span> ] <span class="hljs-meta">... </span> flattened_features = <span class="hljs-built_in">sum</span>(flattened_features, []) <span class="hljs-meta">... </span> batch = self.tokenizer.pad( <span class="hljs-meta">... </span> flattened_features, <span class="hljs-meta">... </span> padding=self.padding, <span class="hljs-meta">... </span> max_length=self.max_length, <span class="hljs-meta">... </span> pad_to_multiple_of=self.pad_to_multiple_of, <span class="hljs-meta">... </span> return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, <span class="hljs-meta">... </span> ) <span class="hljs-meta">... </span> batch = {k: v.view(batch_size, num_choices, -<span class="hljs-number">1</span>) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> batch.items()} <span class="hljs-meta">... </span> batch[<span class="hljs-string">&quot;labels&quot;</span>] = torch.tensor(labels, dtype=torch.int64) <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> batch<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="finetune-with-trainer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#finetune-with-trainer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Fine-tune with Trainer </span></h2> <p>Load BERT with <a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoModelForMultipleChoice">AutoModelForMultipleChoice</a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForMultipleChoice, TrainingArguments, Trainer <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForMultipleChoice.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If you aren’t familiar with fine-tuning a model with Trainer, take a look at the basic tutorial <a href="training#finetune-with-trainer">here</a>!</p></div> <p>At this point, only three steps remain:</p> <ol><li>Define your training hyperparameters in <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a>.</li> <li>Pass the training arguments to <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> along with the model, dataset, tokenizer, and data collator.</li> <li>Call <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer.train">train()</a> to fine-tune your model.</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> evaluation_strategy=<span class="hljs-string">&quot;epoch&quot;</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">5e-5</span>, <span class="hljs-meta">... </span> per_device_train_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> per_device_eval_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> num_train_epochs=<span class="hljs-number">3</span>, <span class="hljs-meta">... </span> weight_decay=<span class="hljs-number">0.01</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=tokenized_swag[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=tokenized_swag[<span class="hljs-string">&quot;validation&quot;</span>], <span class="hljs-meta">... </span> tokenizer=tokenizer, <span class="hljs-meta">... </span> data_collator=DataCollatorForMultipleChoice(tokenizer=tokenizer), <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="finetune-with-tensorflow" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#finetune-with-tensorflow"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Fine-tune with TensorFlow </span></h2> <p>To fine-tune a model in TensorFlow is just as easy, with only a few differences.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If you aren’t familiar with fine-tuning a model with Keras, take a look at the basic tutorial <a href="training#finetune-with-keras">here</a>!</p></div> <p>Convert your datasets to the <code>tf.data.Dataset</code> format with <a href="https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.to_tf_dataset" rel="nofollow"><code>to_tf_dataset</code></a>. Specify inputs in <code>columns</code>, targets in <code>label_cols</code>, whether to shuffle the dataset order, batch size, and the data collator:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorForMultipleChoice(tokenizer=tokenizer) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_train_set = tokenized_swag[<span class="hljs-string">&quot;train&quot;</span>].to_tf_dataset( <span class="hljs-meta">... </span> columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>], <span class="hljs-meta">... </span> label_cols=[<span class="hljs-string">&quot;labels&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> batch_size=batch_size, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_validation_set = tokenized_swag[<span class="hljs-string">&quot;validation&quot;</span>].to_tf_dataset( <span class="hljs-meta">... </span> columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>], <span class="hljs-meta">... </span> label_cols=[<span class="hljs-string">&quot;labels&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> batch_size=batch_size, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <p>Set up an optimizer function, learning rate schedule, and some training hyperparameters:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> create_optimizer <span class="hljs-meta">&gt;&gt;&gt; </span>batch_size = <span class="hljs-number">16</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_train_epochs = <span class="hljs-number">2</span> <span class="hljs-meta">&gt;&gt;&gt; </span>total_train_steps = (<span class="hljs-built_in">len</span>(tokenized_swag[<span class="hljs-string">&quot;train&quot;</span>]) // batch_size) * num_train_epochs <span class="hljs-meta">&gt;&gt;&gt; </span>optimizer, schedule = create_optimizer(init_lr=<span class="hljs-number">5e-5</span>, num_warmup_steps=<span class="hljs-number">0</span>, num_train_steps=total_train_steps)<!-- HTML_TAG_END --></pre></div> <p>Load BERT with <a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.TFAutoModelForMultipleChoice">TFAutoModelForMultipleChoice</a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForMultipleChoice <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForMultipleChoice.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Configure the model for training with <a href="https://keras.io/api/models/model_training_apis/#compile-method" rel="nofollow"><code>compile</code></a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>model.<span class="hljs-built_in">compile</span>( <span class="hljs-meta">... </span> optimizer=optimizer, <span class="hljs-meta">... </span> loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=<span class="hljs-literal">True</span>), <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <p>Call <a href="https://keras.io/api/models/model_training_apis/#fit-method" rel="nofollow"><code>fit</code></a> to fine-tune the model:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=<span class="hljs-number">2</span>)<!-- HTML_TAG_END --></pre></div> <script type="module" data-hydrate="z4513d"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="z4513d"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/tasks/multiple_choice.mdx-7c0ab1b6.js") ], params: {} } }); </script>
448
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/tasks/question_answering.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;question-answering&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;load-squad-dataset&quot;,&quot;title&quot;:&quot;Load SQuAD dataset&quot;},{&quot;local&quot;:&quot;preprocess&quot;,&quot;title&quot;:&quot;Preprocess&quot;},{&quot;local&quot;:&quot;finetune-with-trainer&quot;,&quot;title&quot;:&quot;Fine-tune with Trainer&quot;},{&quot;local&quot;:&quot;finetune-with-tensorflow&quot;,&quot;title&quot;:&quot;Fine-tune with TensorFlow&quot;}],&quot;title&quot;:&quot;Question answering&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/tasks/question_answering.mdx-8babb2cc.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Youtube-27813aed.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlockFw-27a176a0.js"> <h1 class="relative group"><a id="question-answering" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#question-answering"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Question answering </span></h1> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/ajPx5LwJD-I" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>Question answering tasks return an answer given a question. There are two common forms of question answering:</p> <ul><li>Extractive: extract the answer from the given context.</li> <li>Abstractive: generate an answer from the context that correctly answers the question.</li></ul> <p>This guide will show you how to fine-tune <a href="https://huggingface.co/distilbert-base-uncased" rel="nofollow">DistilBERT</a> on the <a href="https://huggingface.co/datasets/squad" rel="nofollow">SQuAD</a> dataset for extractive question answering.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>See the question answering <a href="https://huggingface.co/tasks/question-answering" rel="nofollow">task page</a> for more information about other forms of question answering and their associated models, datasets, and metrics.</p></div> <h2 class="relative group"><a id="load-squad-dataset" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#load-squad-dataset"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Load SQuAD dataset </span></h2> <p>Load the SQuAD dataset from the 🤗 Datasets library:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>squad = load_dataset(<span class="hljs-string">&quot;squad&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Then take a look at an example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>squad[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;answers&#x27;</span>: {<span class="hljs-string">&#x27;answer_start&#x27;</span>: [<span class="hljs-number">515</span>], <span class="hljs-string">&#x27;text&#x27;</span>: [<span class="hljs-string">&#x27;Saint Bernadette Soubirous&#x27;</span>]}, <span class="hljs-string">&#x27;context&#x27;</span>: <span class="hljs-string">&#x27;Architecturally, the school has a Catholic character. Atop the Main Building\&#x27;s gold dome is a golden statue of the Virgin Mary. Immediately in front of the Main Building and facing it, is a copper statue of Christ with arms upraised with the legend &quot;Venite Ad Me Omnes&quot;. Next to the Main Building is the Basilica of the Sacred Heart. Immediately behind the basilica is the Grotto, a Marian place of prayer and reflection. It is a replica of the grotto at Lourdes, France where the Virgin Mary reputedly appeared to Saint Bernadette Soubirous in 1858. At the end of the main drive (and in a direct line that connects through 3 statues and the Gold Dome), is a simple, modern stone statue of Mary.&#x27;</span>, <span class="hljs-string">&#x27;id&#x27;</span>: <span class="hljs-string">&#x27;5733be284776f41900661182&#x27;</span>, <span class="hljs-string">&#x27;question&#x27;</span>: <span class="hljs-string">&#x27;To whom did the Virgin Mary allegedly appear in 1858 in Lourdes France?&#x27;</span>, <span class="hljs-string">&#x27;title&#x27;</span>: <span class="hljs-string">&#x27;University_of_Notre_Dame&#x27;</span> }<!-- HTML_TAG_END --></pre></div> <p>The <code>answers</code> field is a dictionary containing the starting position of the answer and the <code>text</code> of the answer.</p> <h2 class="relative group"><a id="preprocess" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#preprocess"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Preprocess </span></h2> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/qgaM0weJHpA" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>Load the DistilBERT tokenizer to process the <code>question</code> and <code>context</code> fields:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>There are a few preprocessing steps particular to question answering that you should be aware of:</p> <ol><li>Some examples in a dataset may have a very long <code>context</code> that exceeds the maximum input length of the model. Truncate only the <code>context</code> by setting <code>truncation=&quot;only_second&quot;</code>.</li> <li>Next, map the start and end positions of the answer to the original <code>context</code> by setting <code>return_offset_mapping=True</code>.</li> <li>With the mapping in hand, you can find the start and end tokens of the answer. Use the <a href="https://huggingface.co/docs/tokenizers/python/latest/api/reference.html#tokenizers.Encoding.sequence_ids" rel="nofollow"><code>sequence_ids</code></a> method to find which part of the offset corresponds to the <code>question</code> and which corresponds to the <code>context</code>.</li></ol> <p>Here is how you can create a function to truncate and map the start and end tokens of the answer to the <code>context</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">preprocess_function</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> questions = [q.strip() <span class="hljs-keyword">for</span> q <span class="hljs-keyword">in</span> examples[<span class="hljs-string">&quot;question&quot;</span>]] <span class="hljs-meta">... </span> inputs = tokenizer( <span class="hljs-meta">... </span> questions, <span class="hljs-meta">... </span> examples[<span class="hljs-string">&quot;context&quot;</span>], <span class="hljs-meta">... </span> max_length=<span class="hljs-number">384</span>, <span class="hljs-meta">... </span> truncation=<span class="hljs-string">&quot;only_second&quot;</span>, <span class="hljs-meta">... </span> return_offsets_mapping=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> padding=<span class="hljs-string">&quot;max_length&quot;</span>, <span class="hljs-meta">... </span> ) <span class="hljs-meta">... </span> offset_mapping = inputs.pop(<span class="hljs-string">&quot;offset_mapping&quot;</span>) <span class="hljs-meta">... </span> answers = examples[<span class="hljs-string">&quot;answers&quot;</span>] <span class="hljs-meta">... </span> start_positions = [] <span class="hljs-meta">... </span> end_positions = [] <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> i, offset <span class="hljs-keyword">in</span> <span class="hljs-built_in">enumerate</span>(offset_mapping): <span class="hljs-meta">... </span> answer = answers[i] <span class="hljs-meta">... </span> start_char = answer[<span class="hljs-string">&quot;answer_start&quot;</span>][<span class="hljs-number">0</span>] <span class="hljs-meta">... </span> end_char = answer[<span class="hljs-string">&quot;answer_start&quot;</span>][<span class="hljs-number">0</span>] + <span class="hljs-built_in">len</span>(answer[<span class="hljs-string">&quot;text&quot;</span>][<span class="hljs-number">0</span>]) <span class="hljs-meta">... </span> sequence_ids = inputs.sequence_ids(i) <span class="hljs-meta">... </span> <span class="hljs-comment"># Find the start and end of the context</span> <span class="hljs-meta">... </span> idx = <span class="hljs-number">0</span> <span class="hljs-meta">... </span> <span class="hljs-keyword">while</span> sequence_ids[idx] != <span class="hljs-number">1</span>: <span class="hljs-meta">... </span> idx += <span class="hljs-number">1</span> <span class="hljs-meta">... </span> context_start = idx <span class="hljs-meta">... </span> <span class="hljs-keyword">while</span> sequence_ids[idx] == <span class="hljs-number">1</span>: <span class="hljs-meta">... </span> idx += <span class="hljs-number">1</span> <span class="hljs-meta">... </span> context_end = idx - <span class="hljs-number">1</span> <span class="hljs-meta">... </span> <span class="hljs-comment"># If the answer is not fully inside the context, label it (0, 0)</span> <span class="hljs-meta">... </span> <span class="hljs-keyword">if</span> offset[context_start][<span class="hljs-number">0</span>] &gt; end_char <span class="hljs-keyword">or</span> offset[context_end][<span class="hljs-number">1</span>] &lt; start_char: <span class="hljs-meta">... </span> start_positions.append(<span class="hljs-number">0</span>) <span class="hljs-meta">... </span> end_positions.append(<span class="hljs-number">0</span>) <span class="hljs-meta">... </span> <span class="hljs-keyword">else</span>: <span class="hljs-meta">... </span> <span class="hljs-comment"># Otherwise it&#x27;s the start and end token positions</span> <span class="hljs-meta">... </span> idx = context_start <span class="hljs-meta">... </span> <span class="hljs-keyword">while</span> idx &lt;= context_end <span class="hljs-keyword">and</span> offset[idx][<span class="hljs-number">0</span>] &lt;= start_char: <span class="hljs-meta">... </span> idx += <span class="hljs-number">1</span> <span class="hljs-meta">... </span> start_positions.append(idx - <span class="hljs-number">1</span>) <span class="hljs-meta">... </span> idx = context_end <span class="hljs-meta">... </span> <span class="hljs-keyword">while</span> idx &gt;= context_start <span class="hljs-keyword">and</span> offset[idx][<span class="hljs-number">1</span>] &gt;= end_char: <span class="hljs-meta">... </span> idx -= <span class="hljs-number">1</span> <span class="hljs-meta">... </span> end_positions.append(idx + <span class="hljs-number">1</span>) <span class="hljs-meta">... </span> inputs[<span class="hljs-string">&quot;start_positions&quot;</span>] = start_positions <span class="hljs-meta">... </span> inputs[<span class="hljs-string">&quot;end_positions&quot;</span>] = end_positions <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> inputs<!-- HTML_TAG_END --></pre></div> <p>Use 🤗 Datasets <a href="https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map" rel="nofollow"><code>map</code></a> function to apply the preprocessing function over the entire dataset. You can speed up the <code>map</code> function by setting <code>batched=True</code> to process multiple elements of the dataset at once. Remove the columns you don’t need:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tokenized_squad = squad.<span class="hljs-built_in">map</span>(preprocess_function, batched=<span class="hljs-literal">True</span>, remove_columns=squad[<span class="hljs-string">&quot;train&quot;</span>].column_names)<!-- HTML_TAG_END --></pre></div> <p>Use <a href="/docs/transformers/pr_16143/en/main_classes/data_collator#transformers.DefaultDataCollator">DefaultDataCollator</a> to create a batch of examples. Unlike other data collators in 🤗 Transformers, the <code>DefaultDataCollator</code> does not apply additional preprocessing such as padding.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="Copy code excerpt to clipboard" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><div><div class="bg-white leading-none border border-gray-100 rounded-lg inline-flex p-0.5 text-sm mb-4 select-none"><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-l false"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <p class="!m-0 ">Pytorch</p> </button><button class="flex justify-center py-1.5 px-2.5 focus:outline-none rounded-r text-gray-500 filter grayscale"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <p class="!m-0 ">TensorFlow</p> </button></div></div><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DefaultDataCollator <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DefaultDataCollator()<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="finetune-with-trainer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#finetune-with-trainer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Fine-tune with Trainer </span></h2> <p>Load DistilBERT with <a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.AutoModelForQuestionAnswering">AutoModelForQuestionAnswering</a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForQuestionAnswering, TrainingArguments, Trainer <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForQuestionAnswering.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If you aren’t familiar with fine-tuning a model with the <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>, take a look at the basic tutorial <a href="training#finetune-with-trainer">here</a>!</p></div> <p>At this point, only three steps remain:</p> <ol><li>Define your training hyperparameters in <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.TrainingArguments">TrainingArguments</a>.</li> <li>Pass the training arguments to <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a> along with the model, dataset, tokenizer, and data collator.</li> <li>Call <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer.train">train()</a> to fine-tune your model.</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> evaluation_strategy=<span class="hljs-string">&quot;epoch&quot;</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">2e-5</span>, <span class="hljs-meta">... </span> per_device_train_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> per_device_eval_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> num_train_epochs=<span class="hljs-number">3</span>, <span class="hljs-meta">... </span> weight_decay=<span class="hljs-number">0.01</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=tokenized_squad[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=tokenized_squad[<span class="hljs-string">&quot;validation&quot;</span>], <span class="hljs-meta">... </span> tokenizer=tokenizer, <span class="hljs-meta">... </span> data_collator=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="finetune-with-tensorflow" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#finetune-with-tensorflow"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Fine-tune with TensorFlow </span></h2> <p>To fine-tune a model in TensorFlow is just as easy, with only a few differences.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If you aren’t familiar with fine-tuning a model with Keras, take a look at the basic tutorial <a href="training#finetune-with-keras">here</a>!</p></div> <p>Convert your datasets to the <code>tf.data.Dataset</code> format with <a href="https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.to_tf_dataset" rel="nofollow"><code>to_tf_dataset</code></a>. Specify inputs and the start and end positions of an answer in <code>columns</code>, whether to shuffle the dataset order, batch size, and the data collator:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tf_train_set = tokenized_squad[<span class="hljs-string">&quot;train&quot;</span>].to_tf_dataset( <span class="hljs-meta">... </span> columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;start_positions&quot;</span>, <span class="hljs-string">&quot;end_positions&quot;</span>], <span class="hljs-meta">... </span> dummy_labels=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_validation_set = tokenized_squad[<span class="hljs-string">&quot;validation&quot;</span>].to_tf_dataset( <span class="hljs-meta">... </span> columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;start_positions&quot;</span>, <span class="hljs-string">&quot;end_positions&quot;</span>], <span class="hljs-meta">... </span> dummy_labels=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <p>Set up an optimizer function, learning rate schedule, and some training hyperparameters:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> create_optimizer <span class="hljs-meta">&gt;&gt;&gt; </span>batch_size = <span class="hljs-number">16</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_epochs = <span class="hljs-number">2</span> <span class="hljs-meta">&gt;&gt;&gt; </span>total_train_steps = (<span class="hljs-built_in">len</span>(tokenized_squad[<span class="hljs-string">&quot;train&quot;</span>]) // batch_size) * num_epochs <span class="hljs-meta">&gt;&gt;&gt; </span>optimizer, schedule = create_optimizer( <span class="hljs-meta">... </span> init_lr=<span class="hljs-number">2e-5</span>, <span class="hljs-meta">... </span> num_warmup_steps=<span class="hljs-number">0</span>, <span class="hljs-meta">... </span> num_train_steps=total_train_steps, <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <p>Load DistilBERT with <a href="/docs/transformers/pr_16143/en/model_doc/auto#transformers.TFAutoModelForQuestionAnswering">TFAutoModelForQuestionAnswering</a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForQuestionAnswering(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Configure the model for training with <a href="https://keras.io/api/models/model_training_apis/#compile-method" rel="nofollow"><code>compile</code></a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>model.<span class="hljs-built_in">compile</span>(optimizer=optimizer)<!-- HTML_TAG_END --></pre></div> <p>Call <a href="https://keras.io/api/models/model_training_apis/#fit-method" rel="nofollow"><code>fit</code></a> to fine-tune the model:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=<span class="hljs-number">3</span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>For a more in-depth example of how to fine-tune a model for question answering, take a look at the corresponding <a href="https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/question_answering.ipynb" rel="nofollow">PyTorch notebook</a> or <a href="https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/question_answering-tf.ipynb" rel="nofollow">TensorFlow notebook</a>.</p></div> <script type="module" data-hydrate="b6vnn0"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="b6vnn0"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/tasks/question_answering.mdx-8babb2cc.js") ], params: {} } }); </script>
449
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/internal/trainer_utils.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;utilities-for-trainer&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.EvalPrediction&quot;,&quot;title&quot;:&quot;Utilities&quot;},{&quot;local&quot;:&quot;transformers.trainer_callback.CallbackHandler&quot;,&quot;title&quot;:&quot;Callbacks internals&quot;},{&quot;local&quot;:&quot;transformers.trainer_pt_utils.DistributedTensorGatherer&quot;,&quot;title&quot;:&quot;Distributed Evaluation&quot;},{&quot;local&quot;:&quot;transformers.HfArgumentParser&quot;,&quot;title&quot;:&quot;Distributed Evaluation&quot;},{&quot;local&quot;:&quot;transformers.debug_utils.DebugUnderflowOverflow&quot;,&quot;title&quot;:&quot;Debug Utilities&quot;}],&quot;title&quot;:&quot;Utilities for Trainer&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/internal/trainer_utils.mdx-b7a1b196.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="utilities-for-trainer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#utilities-for-trainer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Utilities for Trainer </span></h1> <p>This page lists all the utility functions used by <a href="/docs/transformers/pr_16143/en/main_classes/trainer#transformers.Trainer">Trainer</a>.</p> <p>Most of those are only useful if you are studying the code of the Trainer in the library.</p> <h2 class="relative group"><a id="transformers.EvalPrediction" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.EvalPrediction"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Utilities </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.EvalPrediction"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">EvalPrediction</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.EvalPrediction" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.EvalPrediction"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_utils.py#L67" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">predictions<span class="opacity-60">: typing.Union[numpy.ndarray, typing.Tuple[numpy.ndarray]]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">label_ids<span class="opacity-60">: typing.Union[numpy.ndarray, typing.Tuple[numpy.ndarray]]</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.EvalPrediction.predictions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.EvalPrediction.predictions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>predictions</strong> (<code>np.ndarray</code>) &#x2014; Predictions of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.EvalPrediction.label_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.EvalPrediction.label_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>label_ids</strong> (<code>np.ndarray</code>) &#x2014; Targets to be matched.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Evaluation output (always contains labels), to be used to compute metrics.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.IntervalStrategy"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">IntervalStrategy</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.IntervalStrategy" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.IntervalStrategy"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_utils.py#L115" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">value<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">names<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">module<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">qualname<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">type<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start<span class="opacity-60"> = 1</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>An enumeration.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.set_seed"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.set_seed</span></h4><!-- HTML_TAG_END --> <a id="transformers.set_seed" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.set_seed"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_utils.py#L50" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">seed<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.set_seed.seed" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.set_seed.seed"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>seed</strong> (<code>int</code>) &#x2014; The seed to set.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Helper function for reproducible behavior to set the seed in <code>random</code>, <code>numpy</code>, <code>torch</code> and/or <code>tf</code> (if installed).</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.torch_distributed_zero_first"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.torch_distributed_zero_first</span></h4><!-- HTML_TAG_END --> <a id="transformers.torch_distributed_zero_first" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.torch_distributed_zero_first"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_pt_utils.py#L212" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">local_rank<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.torch_distributed_zero_first.local_rank" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.torch_distributed_zero_first.local_rank"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>local_rank</strong> (<code>int</code>) &#x2014; The rank of the local process.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Decorator to make all processes in distributed training wait for each local_master to do something.</p></div> <h2 class="relative group"><a id="transformers.trainer_callback.CallbackHandler" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.trainer_callback.CallbackHandler"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Callbacks internals </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.trainer_callback.CallbackHandler"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.trainer_callback.</span><span class="font-semibold">CallbackHandler</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.trainer_callback.CallbackHandler" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.trainer_callback.CallbackHandler"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_callback.py#L284" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">callbacks<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokenizer<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">optimizer<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">lr_scheduler<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Internal class that just calls the list of callbacks in order.</p></div> <h2 class="relative group"><a id="transformers.trainer_pt_utils.DistributedTensorGatherer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.trainer_pt_utils.DistributedTensorGatherer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Distributed Evaluation </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.trainer_pt_utils.DistributedTensorGatherer"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.trainer_pt_utils.</span><span class="font-semibold">DistributedTensorGatherer</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.trainer_pt_utils.DistributedTensorGatherer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.trainer_pt_utils.DistributedTensorGatherer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_pt_utils.py#L338" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">world_size<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_samples<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">make_multiple_of<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">padding_index<span class="opacity-60"> = -100</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.trainer_pt_utils.DistributedTensorGatherer.world_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.trainer_pt_utils.DistributedTensorGatherer.world_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>world_size</strong> (<code>int</code>) &#x2014; The number of processes used in the distributed training.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.trainer_pt_utils.DistributedTensorGatherer.num_samples" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.trainer_pt_utils.DistributedTensorGatherer.num_samples"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_samples</strong> (<code>int</code>) &#x2014; The number of samples in our dataset.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.trainer_pt_utils.DistributedTensorGatherer.make_multiple_of" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.trainer_pt_utils.DistributedTensorGatherer.make_multiple_of"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>make_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If passed, the class assumes the datasets passed to each process are made to be a multiple of this argument (by adding samples).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.trainer_pt_utils.DistributedTensorGatherer.padding_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.trainer_pt_utils.DistributedTensorGatherer.padding_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding_index</strong> (<code>int</code>, <em>optional</em>, defaults to -100) &#x2014; The padding index to use if the arrays don&#x2019;t all have the same sequence length.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>A class responsible for properly gathering tensors (or nested list/tuple of tensors) on the CPU by chunks.</p> <p>If our dataset has 16 samples with a batch size of 2 on 3 processes and we gather then transfer on CPU at every step, our sampler will generate the following indices:</p> <p><code>[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1]</code></p> <p>to get something of size a multiple of 3 (so that each process gets the same dataset length). Then process 0, 1 and 2 will be responsible of making predictions for the following samples:</p> <ul><li>P0: <code>[0, 1, 2, 3, 4, 5]</code></li> <li>P1: <code>[6, 7, 8, 9, 10, 11]</code></li> <li>P2: <code>[12, 13, 14, 15, 0, 1]</code></li></ul> <p>The first batch treated on each process will be</p> <ul><li>P0: <code>[0, 1]</code></li> <li>P1: <code>[6, 7]</code></li> <li>P2: <code>[12, 13]</code></li></ul> <p>So if we gather at the end of the first batch, we will get a tensor (nested list/tuple of tensor) corresponding to the following indices:</p> <p><code>[0, 1, 6, 7, 12, 13]</code></p> <p>If we directly concatenate our results without taking any precautions, the user will then get the predictions for the indices in this order at the end of the prediction loop:</p> <p><code>[0, 1, 6, 7, 12, 13, 2, 3, 8, 9, 14, 15, 4, 5, 10, 11, 0, 1]</code></p> <p>For some reason, that’s not going to roll their boat. This class is there to solve that problem.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.trainer_pt_utils.DistributedTensorGatherer.add_arrays"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>add_arrays</span></h4><!-- HTML_TAG_END --> <a id="transformers.trainer_pt_utils.DistributedTensorGatherer.add_arrays" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.trainer_pt_utils.DistributedTensorGatherer.add_arrays"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_pt_utils.py#L399" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">arrays<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Add <code>arrays</code> to the internal storage, Will initialize the storage to the full size at the first arrays passed so that if we’re bound to get an OOM, it happens at the beginning.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.trainer_pt_utils.DistributedTensorGatherer.finalize"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>finalize</span></h4><!-- HTML_TAG_END --> <a id="transformers.trainer_pt_utils.DistributedTensorGatherer.finalize" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.trainer_pt_utils.DistributedTensorGatherer.finalize"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/trainer_pt_utils.py#L435" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Return the properly gathered arrays and truncate to the number of samples (since the sampler added some extras to get each process a dataset of the same length).</p></div></div> <h2 class="relative group"><a id="transformers.HfArgumentParser" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.HfArgumentParser"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Distributed Evaluation </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.HfArgumentParser"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">HfArgumentParser</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.HfArgumentParser" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.HfArgumentParser"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/hf_argparser.py#L44" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dataclass_types<span class="opacity-60">: typing.Union[DataClassType, typing.Iterable[DataClassType]]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>This subclass of <code>argparse.ArgumentParser</code> uses type hints on dataclasses to generate arguments.</p> <p>The class is designed to play well with the native argparse. In particular, you can add more (non-dataclass backed) arguments to the parser after initialization and you’ll get the output back after parsing as an additional namespace. Optional: To create sub argument groups use the <code>_argument_group_name</code> attribute in the dataclass.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.HfArgumentParser.parse_args_into_dataclasses"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>parse_args_into_dataclasses</span></h4><!-- HTML_TAG_END --> <a id="transformers.HfArgumentParser.parse_args_into_dataclasses" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.HfArgumentParser.parse_args_into_dataclasses"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/hf_argparser.py#L161" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_remaining_strings<span class="opacity-60"> = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">look_for_args_file<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args_filename<span class="opacity-60"> = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>Tuple consisting of</span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.HfArgumentParser.parse_args_into_dataclasses.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>Tuple consisting of</p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <ul> <li>the dataclass instances in the same order as they were passed to the initializer.abspath</li> <li>if applicable, an additional namespace for more (non-dataclass backed) arguments added to the parser after initialization.</li> <li>The potential list of remaining argument strings. (same as argparse.ArgumentParser.parse_known_args)</li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>Parse command-line args into instances of the specified dataclass types.</p> <p>This relies on argparse’s <code>ArgumentParser.parse_known_args</code>. See the doc at: docs.python.org/3.7/library/argparse.html#argparse.ArgumentParser.parse_args</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.HfArgumentParser.parse_dict"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>parse_dict</span></h4><!-- HTML_TAG_END --> <a id="transformers.HfArgumentParser.parse_dict" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.HfArgumentParser.parse_dict"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/hf_argparser.py#L234" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">args<span class="opacity-60">: dict</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Alternative helper method that does not use <code>argparse</code> at all, instead uses a dict and populating the dataclass types.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.HfArgumentParser.parse_json_file"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>parse_json_file</span></h4><!-- HTML_TAG_END --> <a id="transformers.HfArgumentParser.parse_json_file" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.HfArgumentParser.parse_json_file"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/hf_argparser.py#L220" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">json_file<span class="opacity-60">: str</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Alternative helper method that does not use <code>argparse</code> at all, instead loading a json file and populating the dataclass types.</p></div></div> <h2 class="relative group"><a id="transformers.debug_utils.DebugUnderflowOverflow" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.debug_utils.DebugUnderflowOverflow"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Debug Utilities </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.debug_utils.DebugUnderflowOverflow"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.debug_utils.</span><span class="font-semibold">DebugUnderflowOverflow</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.debug_utils.DebugUnderflowOverflow" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.debug_utils.DebugUnderflowOverflow"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/debug_utils.py#L28" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_frames_to_save<span class="opacity-60"> = 21</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">trace_batch_nums<span class="opacity-60"> = []</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">abort_after_batch_num<span class="opacity-60"> = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.debug_utils.DebugUnderflowOverflow.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.debug_utils.DebugUnderflowOverflow.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<code>nn.Module</code>) &#x2014; The model to debug.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.debug_utils.DebugUnderflowOverflow.max_frames_to_save" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.debug_utils.DebugUnderflowOverflow.max_frames_to_save"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_frames_to_save</strong> (<code>int</code>, <em>optional</em>, defaults to 21) &#x2014; How many frames back to record<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.debug_utils.DebugUnderflowOverflow.trace_batch_nums(List[int]," class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.debug_utils.DebugUnderflowOverflow.trace_batch_nums(List[int],"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>trace_batch_nums(<code>List[int]</code>,</strong> <em>optional</em>, defaults to <code>[]</code>) &#x2014; Which batch numbers to trace (turns detection off)<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.debug_utils.DebugUnderflowOverflow.abort_after_batch_num" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.debug_utils.DebugUnderflowOverflow.abort_after_batch_num"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>abort_after_batch_num</strong> (`int&#x201C;, <em>optional</em>) &#x2014; Whether to abort after a certain batch number has finished<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>This debug class helps detect and understand where the model starts getting very large or very small, and more importantly <code>nan</code> or <code>inf</code> weight and activation elements.</p> <p>There are 2 working modes:</p> <ol><li>Underflow/overflow detection (default)</li> <li>Specific batch absolute min/max tracing without detection</li></ol> <p>Mode 1: Underflow/overflow detection</p> <p>To activate the underflow/overflow detection, initialize the object with the model :</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->debug_overflow = DebugUnderflowOverflow(model)<!-- HTML_TAG_END --></pre></div> <p>then run the training as normal and if <code>nan</code> or <code>inf</code> gets detected in at least one of the weight, input or output elements this module will throw an exception and will print <code>max_frames_to_save</code> frames that lead to this event, each frame reporting</p> <ol><li>the fully qualified module name plus the class name whose <code>forward</code> was run</li> <li>the absolute min and max value of all elements for each module weights, and the inputs and output</li></ol> <p>For example, here is the header and the last few frames in detection report for <code>google/mt5-small</code> run in fp16 mixed precision :</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-attribute">Detected</span> inf/nan during batch_number=<span class="hljs-number">0</span> <span class="hljs-attribute">Last</span> <span class="hljs-number">21</span> forward frames: <span class="hljs-attribute">abs</span> min abs max metadata<span class="hljs-meta"> [...]</span> <span class="hljs-attribute">encoder</span>.block.<span class="hljs-number">2</span>.layer.<span class="hljs-number">1</span>.DenseReluDense.wi_0 Linear <span class="hljs-attribute">2</span>.<span class="hljs-number">17</span>e-<span class="hljs-number">07</span> <span class="hljs-number">4</span>.<span class="hljs-number">50</span>e+<span class="hljs-number">00</span> weight <span class="hljs-attribute">1</span>.<span class="hljs-number">79</span>e-<span class="hljs-number">06</span> <span class="hljs-number">4</span>.<span class="hljs-number">65</span>e+<span class="hljs-number">00</span> input[<span class="hljs-number">0</span>] <span class="hljs-attribute">2</span>.<span class="hljs-number">68</span>e-<span class="hljs-number">06</span> <span class="hljs-number">3</span>.<span class="hljs-number">70</span>e+<span class="hljs-number">01</span> output <span class="hljs-attribute">encoder</span>.block.<span class="hljs-number">2</span>.layer.<span class="hljs-number">1</span>.DenseReluDense.wi_1 Linear <span class="hljs-attribute">8</span>.<span class="hljs-number">08</span>e-<span class="hljs-number">07</span> <span class="hljs-number">2</span>.<span class="hljs-number">66</span>e+<span class="hljs-number">01</span> weight <span class="hljs-attribute">1</span>.<span class="hljs-number">79</span>e-<span class="hljs-number">06</span> <span class="hljs-number">4</span>.<span class="hljs-number">65</span>e+<span class="hljs-number">00</span> input[<span class="hljs-number">0</span>] <span class="hljs-attribute">1</span>.<span class="hljs-number">27</span>e-<span class="hljs-number">04</span> <span class="hljs-number">2</span>.<span class="hljs-number">37</span>e+<span class="hljs-number">02</span> output <span class="hljs-attribute">encoder</span>.block.<span class="hljs-number">2</span>.layer.<span class="hljs-number">1</span>.DenseReluDense.wo Linear <span class="hljs-attribute">1</span>.<span class="hljs-number">01</span>e-<span class="hljs-number">06</span> <span class="hljs-number">6</span>.<span class="hljs-number">44</span>e+<span class="hljs-number">00</span> weight <span class="hljs-attribute">0</span>.<span class="hljs-number">00</span>e+<span class="hljs-number">00</span> <span class="hljs-number">9</span>.<span class="hljs-number">74</span>e+<span class="hljs-number">03</span> input[<span class="hljs-number">0</span>] <span class="hljs-attribute">3</span>.<span class="hljs-number">18</span>e-<span class="hljs-number">04</span> <span class="hljs-number">6</span>.<span class="hljs-number">27</span>e+<span class="hljs-number">04</span> output <span class="hljs-attribute">encoder</span>.block.<span class="hljs-number">2</span>.layer.<span class="hljs-number">1</span>.DenseReluDense T5DenseGatedGeluDense <span class="hljs-attribute">1</span>.<span class="hljs-number">79</span>e-<span class="hljs-number">06</span> <span class="hljs-number">4</span>.<span class="hljs-number">65</span>e+<span class="hljs-number">00</span> input[<span class="hljs-number">0</span>] <span class="hljs-attribute">3</span>.<span class="hljs-number">18</span>e-<span class="hljs-number">04</span> <span class="hljs-number">6</span>.<span class="hljs-number">27</span>e+<span class="hljs-number">04</span> output <span class="hljs-attribute">encoder</span>.block.<span class="hljs-number">2</span>.layer.<span class="hljs-number">1</span>.dropout Dropout <span class="hljs-attribute">3</span>.<span class="hljs-number">18</span>e-<span class="hljs-number">04</span> <span class="hljs-number">6</span>.<span class="hljs-number">27</span>e+<span class="hljs-number">04</span> input[<span class="hljs-number">0</span>] <span class="hljs-attribute">0</span>.<span class="hljs-number">00</span>e+<span class="hljs-number">00</span> inf output<!-- HTML_TAG_END --></pre></div> <p>You can see here, that <code>T5DenseGatedGeluDense.forward</code> resulted in output activations, whose absolute max value was around 62.7K, which is very close to fp16’s top limit of 64K. In the next frame we have <code>Dropout</code> which renormalizes the weights, after it zeroed some of the elements, which pushes the absolute max value to more than 64K, and we get an overlow.</p> <p>As you can see it’s the previous frames that we need to look into when the numbers start going into very large for fp16 numbers.</p> <p>The tracking is done in a forward hook, which gets invoked immediately after <code>forward</code> has completed.</p> <p>By default the last 21 frames are printed. You can change the default to adjust for your needs. For example :</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->debug_overflow = DebugUnderflowOverflow(model, max_frames_to_save=<span class="hljs-number">100</span>)<!-- HTML_TAG_END --></pre></div> <p>To validate that you have set up this debugging feature correctly, and you intend to use it in a training that may take hours to complete, first run it with normal tracing enabled for one of a few batches as explained in the next section.</p> <p>Mode 2. Specific batch absolute min/max tracing without detection</p> <p>The second work mode is per-batch tracing with the underflow/overflow detection feature turned off.</p> <p>Let’s say you want to watch the absolute min and max values for all the ingredients of each <code>forward</code> call of a given batch, and only do that for batches 1 and 3. Then you instantiate this class as :</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[<span class="hljs-number">1</span>, <span class="hljs-number">3</span>])<!-- HTML_TAG_END --></pre></div> <p>And now full batches 1 and 3 will be traced using the same format as explained above. Batches are 0-indexed.</p> <p>This is helpful if you know that the program starts misbehaving after a certain batch number, so you can fast-forward right to that area.</p> <p>Early stopping:</p> <p>You can also specify the batch number after which to stop the training, with :</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[<span class="hljs-number">1</span>, <span class="hljs-number">3</span>], abort_after_batch_num=<span class="hljs-number">3</span>)<!-- HTML_TAG_END --></pre></div> <p>This feature is mainly useful in the tracing mode, but you can use it for any mode.</p> <p><strong>Performance</strong>:</p> <p>As this module measures absolute <code>min</code>/`<code>max</code> of each weight of the model on every forward it’ll slow the training down. Therefore remember to turn it off once the debugging needs have been met.</p></div> <script type="module" data-hydrate="1dxtcok"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="1dxtcok"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/internal/trainer_utils.mdx-b7a1b196.js") ], params: {} } }); </script>
450
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/internal/modeling_utils.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;custom-layers-and-utilities&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.Conv1D&quot;,&quot;title&quot;:&quot;Pytorch custom modules&quot;},{&quot;local&quot;:&quot;transformers.apply_chunking_to_forward&quot;,&quot;title&quot;:&quot;PyTorch Helper Functions&quot;},{&quot;local&quot;:&quot;transformers.modeling_tf_utils.TFConv1D&quot;,&quot;title&quot;:&quot;TensorFlow custom layers&quot;},{&quot;local&quot;:&quot;transformers.modeling_tf_utils.TFCausalLanguageModelingLoss&quot;,&quot;title&quot;:&quot;TensorFlow loss functions&quot;},{&quot;local&quot;:&quot;transformers.modeling_tf_utils.get_initializer&quot;,&quot;title&quot;:&quot;TensorFlow Helper Functions&quot;}],&quot;title&quot;:&quot;Custom Layers and Utilities&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/internal/modeling_utils.mdx-1b04e493.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="custom-layers-and-utilities" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#custom-layers-and-utilities"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Custom Layers and Utilities </span></h1> <p>This page lists all the custom layers used by the library, as well as the utility functions it provides for modeling.</p> <p>Most of those are only useful if you are studying the code of the models in the library.</p> <h2 class="relative group"><a id="transformers.Conv1D" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Conv1D"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Pytorch custom modules </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Conv1D"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">Conv1D</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.Conv1D" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Conv1D"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L1851" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">nf<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">nx<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Conv1D.nf" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Conv1D.nf"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>nf</strong> (<code>int</code>) &#x2014; The number of output features.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.Conv1D.nx" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Conv1D.nx"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>nx</strong> (<code>int</code>) &#x2014; The number of input features.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2).</p> <p>Basically works like a linear layer but the weights are transposed.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_utils.PoolerStartLogits"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_utils.</span><span class="font-semibold">PoolerStartLogits</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_utils.PoolerStartLogits" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_utils.PoolerStartLogits"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L1877" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: PretrainedConfig</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.PoolerStartLogits.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.PoolerStartLogits.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>) &#x2014; The config used by the model, will be used to grab the <code>hidden_size</code> of the model.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Compute SQuAD start logits from sequence hidden states.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_utils.PoolerStartLogits.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_utils.PoolerStartLogits.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_utils.PoolerStartLogits.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L1890" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: FloatTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">p_mask<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>torch.FloatTensor</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.PoolerStartLogits.forward.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.PoolerStartLogits.forward.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, seq_len, hidden_size)</code>) &#x2014; The final hidden states of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.PoolerStartLogits.forward.p_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.PoolerStartLogits.forward.p_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>p_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, seq_len)</code>, <em>optional</em>) &#x2014; Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token should be masked.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.modeling_utils.PoolerStartLogits.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>torch.FloatTensor</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The start logits for SQuAD.</p> <!-- HTML_TAG_END --></p></div></div></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_utils.PoolerEndLogits"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_utils.</span><span class="font-semibold">PoolerEndLogits</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_utils.PoolerEndLogits" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_utils.PoolerEndLogits"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L1915" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: PretrainedConfig</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.PoolerEndLogits.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.PoolerEndLogits.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>) &#x2014; The config used by the model, will be used to grab the <code>hidden_size</code> of the model and the <code>layer_norm_eps</code> to use.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Compute SQuAD end logits from sequence hidden states.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_utils.PoolerEndLogits.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_utils.PoolerEndLogits.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_utils.PoolerEndLogits.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L1932" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: FloatTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start_states<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start_positions<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">p_mask<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>torch.FloatTensor</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.PoolerEndLogits.forward.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.PoolerEndLogits.forward.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, seq_len, hidden_size)</code>) &#x2014; The final hidden states of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.PoolerEndLogits.forward.start_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.PoolerEndLogits.forward.start_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, seq_len, hidden_size)</code>, <em>optional</em>) &#x2014; The hidden states of the first tokens for the labeled span.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.PoolerEndLogits.forward.start_positions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.PoolerEndLogits.forward.start_positions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; The position of the first token for the labeled span.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.PoolerEndLogits.forward.p_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.PoolerEndLogits.forward.p_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>p_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, seq_len)</code>, <em>optional</em>) &#x2014; Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token should be masked.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.modeling_utils.PoolerEndLogits.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>torch.FloatTensor</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The end logits for SQuAD.</p> <!-- HTML_TAG_END --></p></div></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>One of <code>start_states</code> or <code>start_positions</code> should be not <code>None</code>. If both are set, <code>start_positions</code> overrides <code>start_states</code>.</p></div></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_utils.PoolerAnswerClass"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_utils.</span><span class="font-semibold">PoolerAnswerClass</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_utils.PoolerAnswerClass" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_utils.PoolerAnswerClass"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L1984" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.PoolerAnswerClass.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.PoolerAnswerClass.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>) &#x2014; The config used by the model, will be used to grab the <code>hidden_size</code> of the model.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Compute SQuAD 2.0 answer class from classification and start tokens hidden states.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_utils.PoolerAnswerClass.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_utils.PoolerAnswerClass.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_utils.PoolerAnswerClass.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L1999" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: FloatTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start_states<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start_positions<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cls_index<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>torch.FloatTensor</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.PoolerAnswerClass.forward.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.PoolerAnswerClass.forward.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, seq_len, hidden_size)</code>) &#x2014; The final hidden states of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.PoolerAnswerClass.forward.start_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.PoolerAnswerClass.forward.start_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, seq_len, hidden_size)</code>, <em>optional</em>) &#x2014; The hidden states of the first tokens for the labeled span.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.PoolerAnswerClass.forward.start_positions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.PoolerAnswerClass.forward.start_positions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; The position of the first token for the labeled span.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.PoolerAnswerClass.forward.cls_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.PoolerAnswerClass.forward.cls_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cls_index</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Position of the CLS token for each sentence in the batch. If <code>None</code>, takes the last token.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.modeling_utils.PoolerAnswerClass.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>torch.FloatTensor</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The SQuAD 2.0 answer class.</p> <!-- HTML_TAG_END --></p></div></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>One of <code>start_states</code> or <code>start_positions</code> should be not <code>None</code>. If both are set, <code>start_positions</code> overrides <code>start_states</code>.</p></div></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_utils.SquadHeadOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_utils.</span><span class="font-semibold">SquadHeadOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_utils.SquadHeadOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_utils.SquadHeadOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L2050" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">loss<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start_top_log_probs<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start_top_index<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">end_top_log_probs<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">end_top_index<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cls_logits<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.SquadHeadOutput.loss" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.SquadHeadOutput.loss"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned if both <code>start_positions</code> and <code>end_positions</code> are provided) &#x2014; Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.SquadHeadOutput.start_top_log_probs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.SquadHeadOutput.start_top_log_probs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start_top_log_probs</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.start_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) &#x2014; Log probabilities for the top config.start_n_top start token possibilities (beam-search).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.SquadHeadOutput.start_top_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.SquadHeadOutput.start_top_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start_top_index</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, config.start_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) &#x2014; Indices for the top config.start_n_top start token possibilities (beam-search).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.SquadHeadOutput.end_top_log_probs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.SquadHeadOutput.end_top_log_probs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>end_top_log_probs</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.start_n_top * config.end_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) &#x2014; Log probabilities for the top <code>config.start_n_top * config.end_n_top</code> end token possibilities (beam-search).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.SquadHeadOutput.end_top_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.SquadHeadOutput.end_top_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>end_top_index</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, config.start_n_top * config.end_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) &#x2014; Indices for the top <code>config.start_n_top * config.end_n_top</code> end token possibilities (beam-search).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.SquadHeadOutput.cls_logits" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.SquadHeadOutput.cls_logits"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cls_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) &#x2014; Log probabilities for the <code>is_impossible</code> label of the answers.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of question answering models using a <a href="/docs/transformers/pr_16143/en/internal/modeling_utils#transformers.modeling_utils.SQuADHead">SQuADHead</a>.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_utils.SQuADHead"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_utils.</span><span class="font-semibold">SQuADHead</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_utils.SQuADHead" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_utils.SQuADHead"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L2080" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.SQuADHead.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.SQuADHead.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>) &#x2014; The config used by the model, will be used to grab the <code>hidden_size</code> of the model and the <code>layer_norm_eps</code> to use.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>A SQuAD head inspired by XLNet.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_utils.SQuADHead.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_utils.SQuADHead.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_utils.SQuADHead.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L2099" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: FloatTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start_positions<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">end_positions<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cls_index<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">is_impossible<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">p_mask<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_dict<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/internal/modeling_utils#transformers.modeling_utils.SquadHeadOutput" >transformers.modeling_utils.SquadHeadOutput</a> or <code>tuple(torch.FloatTensor)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.SQuADHead.forward.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.SQuADHead.forward.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, seq_len, hidden_size)</code>) &#x2014; Final hidden states of the model on the sequence tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.SQuADHead.forward.start_positions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.SQuADHead.forward.start_positions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Positions of the first token for the labeled span.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.SQuADHead.forward.end_positions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.SQuADHead.forward.end_positions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>end_positions</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Positions of the last token for the labeled span.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.SQuADHead.forward.cls_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.SQuADHead.forward.cls_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cls_index</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Position of the CLS token for each sentence in the batch. If <code>None</code>, takes the last token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.SQuADHead.forward.is_impossible" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.SQuADHead.forward.is_impossible"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>is_impossible</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>) &#x2014; Whether the question has a possible answer in the paragraph or not.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.SQuADHead.forward.p_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.SQuADHead.forward.p_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>p_mask</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, seq_len)</code>, <em>optional</em>) &#x2014; Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token should be masked.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.SQuADHead.forward.return_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.SQuADHead.forward.return_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return a <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a> instead of a plain tuple.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.modeling_utils.SQuADHead.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/internal/modeling_utils#transformers.modeling_utils.SquadHeadOutput" >transformers.modeling_utils.SquadHeadOutput</a> or <code>tuple(torch.FloatTensor)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/internal/modeling_utils#transformers.modeling_utils.SquadHeadOutput" >transformers.modeling_utils.SquadHeadOutput</a> or a tuple of <code>torch.FloatTensor</code> (if <code>return_dict=False</code> is passed or when <code>config.return_dict=False</code>) comprising various elements depending on the configuration (<code>&lt;class 'transformers.configuration_utils.PretrainedConfig'&gt;</code>) and inputs.</p> <ul> <li><strong>loss</strong> (<code>torch.FloatTensor</code> of shape <code>(1,)</code>, <em>optional</em>, returned if both <code>start_positions</code> and <code>end_positions</code> are provided) — Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses.</li> <li><strong>start_top_log_probs</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.start_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) — Log probabilities for the top config.start_n_top start token possibilities (beam-search).</li> <li><strong>start_top_index</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, config.start_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) — Indices for the top config.start_n_top start token possibilities (beam-search).</li> <li><strong>end_top_log_probs</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.start_n_top * config.end_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) — Log probabilities for the top <code>config.start_n_top * config.end_n_top</code> end token possibilities (beam-search).</li> <li><strong>end_top_index</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, config.start_n_top * config.end_n_top)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) — Indices for the top <code>config.start_n_top * config.end_n_top</code> end token possibilities (beam-search).</li> <li><strong>cls_logits</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size,)</code>, <em>optional</em>, returned if <code>start_positions</code> or <code>end_positions</code> is not provided) — Log probabilities for the <code>is_impossible</code> label of the answers.</li> </ul> <!-- HTML_TAG_END --></p></div></div></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_utils.SequenceSummary"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_utils.</span><span class="font-semibold">SequenceSummary</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_utils.SequenceSummary" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_utils.SequenceSummary"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L2197" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config<span class="opacity-60">: PretrainedConfig</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.SequenceSummary.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.SequenceSummary.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>) &#x2014; The config used by the model. Relevant arguments in the config class of the model are (refer to the actual config class of your model for the default values it uses):</p> <ul> <li> <p><strong>summary_type</strong> (<code>str</code>) &#x2014; The method to use to make this summary. Accepted values are:</p> <ul> <li><code>&quot;last&quot;</code> &#x2014; Take the last token hidden state (like XLNet)</li> <li><code>&quot;first&quot;</code> &#x2014; Take the first token hidden state (like Bert)</li> <li><code>&quot;mean&quot;</code> &#x2014; Take the mean of all tokens hidden states</li> <li><code>&quot;cls_index&quot;</code> &#x2014; Supply a Tensor of classification token position (GPT/GPT-2)</li> <li><code>&quot;attn&quot;</code> &#x2014; Not implemented now, use multi-head attention</li> </ul> </li> <li> <p><strong>summary_use_proj</strong> (<code>bool</code>) &#x2014; Add a projection after the vector extraction.</p> </li> <li> <p><strong>summary_proj_to_labels</strong> (<code>bool</code>) &#x2014; If <code>True</code>, the projection outputs to <code>config.num_labels</code> classes (otherwise to <code>config.hidden_size</code>).</p> </li> <li> <p><strong>summary_activation</strong> (<code>Optional[str]</code>) &#x2014; Set to <code>&quot;tanh&quot;</code> to add a tanh activation to the output, another string or <code>None</code> will add no activation.</p> </li> <li> <p><strong>summary_first_dropout</strong> (<code>float</code>) &#x2014; Optional dropout probability before the projection and activation.</p> </li> <li> <p><strong>summary_last_dropout</strong> (<code>float</code>)&#x2014; Optional dropout probability after the projection and activation.</p> </li> </ul><!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Compute a single vector summary of a sequence hidden states.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_utils.SequenceSummary.forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_utils.SequenceSummary.forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_utils.SequenceSummary.forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L2252" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: FloatTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cls_index<span class="opacity-60">: typing.Optional[torch.LongTensor] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>torch.FloatTensor</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.SequenceSummary.forward.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.SequenceSummary.forward.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>torch.FloatTensor</code> of shape <code>[batch_size, seq_len, hidden_size]</code>) &#x2014; The hidden states of the last layer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.SequenceSummary.forward.cls_index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.SequenceSummary.forward.cls_index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cls_index</strong> (<code>torch.LongTensor</code> of shape <code>[batch_size]</code> or <code>[batch_size, ...]</code> where &#x2026; are optional leading dimensions of <code>hidden_states</code>, <em>optional</em>) &#x2014; Used if <code>summary_type == &quot;cls_index&quot;</code> and takes the last token of the sequence as classification token.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.modeling_utils.SequenceSummary.forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>torch.FloatTensor</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The summary of the sequence hidden states.</p> <!-- HTML_TAG_END --></p></div></div> <p>Compute a single vector summary of a sequence hidden states.</p></div></div> <h2 class="relative group"><a id="transformers.apply_chunking_to_forward" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.apply_chunking_to_forward"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>PyTorch Helper Functions </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.apply_chunking_to_forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.apply_chunking_to_forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.apply_chunking_to_forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.apply_chunking_to_forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L2401" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">forward_fn<span class="opacity-60">: typing.Callable[..., torch.Tensor]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">chunk_size<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">chunk_dim<span class="opacity-60">: int</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*input_tensors<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>torch.Tensor</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.apply_chunking_to_forward.forward_fn" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.apply_chunking_to_forward.forward_fn"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>forward_fn</strong> (<code>Callable[..., torch.Tensor]</code>) &#x2014; The forward function of the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.apply_chunking_to_forward.chunk_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.apply_chunking_to_forward.chunk_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>chunk_size</strong> (<code>int</code>) &#x2014; The chunk size of a chunked tensor: <code>num_chunks = len(input_tensors[0]) / chunk_size</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.apply_chunking_to_forward.chunk_dim" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.apply_chunking_to_forward.chunk_dim"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>chunk_dim</strong> (<code>int</code>) &#x2014; The dimension over which the <code>input_tensors</code> should be chunked.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.apply_chunking_to_forward.input_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.apply_chunking_to_forward.input_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_tensors</strong> (<code>Tuple[torch.Tensor]</code>) &#x2014; The input tensors of <code>forward_fn</code> which will be chunked<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.apply_chunking_to_forward.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>torch.Tensor</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A tensor with the same shape as the <code>forward_fn</code> would have given if applied`.</p> <!-- HTML_TAG_END --></p></div></div> <p>This function chunks the <code>input_tensors</code> into smaller input tensor parts of size <code>chunk_size</code> over the dimension <code>chunk_dim</code>. It then applies a layer <code>forward_fn</code> to each chunk independently to save memory.</p> <p>If the <code>forward_fn</code> is independent across the <code>chunk_dim</code> this function will yield the same result as directly applying <code>forward_fn</code> to <code>input_tensors</code>.</p> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-comment"># rename the usual forward() fn to forward_chunk()</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">forward_chunk</span>(<span class="hljs-params">self, hidden_states</span>): hidden_states = self.decoder(hidden_states) <span class="hljs-keyword">return</span> hidden_states <span class="hljs-comment"># implement a chunked forward function</span> <span class="hljs-keyword">def</span> <span class="hljs-title function_">forward</span>(<span class="hljs-params">self, hidden_states</span>): <span class="hljs-keyword">return</span> apply_chunking_to_forward(self.forward_chunk, self.chunk_size_lm_head, self.seq_len_dim, hidden_states)<!-- HTML_TAG_END --></pre></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_utils.find_pruneable_heads_and_indices"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.modeling_utils.find_pruneable_heads_and_indices</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_utils.find_pruneable_heads_and_indices" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_utils.find_pruneable_heads_and_indices"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L95" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">heads<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">n_heads<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">head_size<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">already_pruned_heads<span class="opacity-60">: typing.Set[int]</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>Tuple[Set[int], torch.LongTensor]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.find_pruneable_heads_and_indices.heads" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.find_pruneable_heads_and_indices.heads"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>heads</strong> (<code>List[int]</code>) &#x2014; List of the indices of heads to prune.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.find_pruneable_heads_and_indices.n_heads" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.find_pruneable_heads_and_indices.n_heads"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>n_heads</strong> (<code>int</code>) &#x2014; The number of heads in the model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.find_pruneable_heads_and_indices.head_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.find_pruneable_heads_and_indices.head_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>head_size</strong> (<code>int</code>) &#x2014; The size of each head.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.find_pruneable_heads_and_indices.already_pruned_heads" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.find_pruneable_heads_and_indices.already_pruned_heads"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>already_pruned_heads</strong> (<code>Set[int]</code>) &#x2014; A set of already pruned heads.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.modeling_utils.find_pruneable_heads_and_indices.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>Tuple[Set[int], torch.LongTensor]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A tuple with the remaining heads and their corresponding indices.</p> <!-- HTML_TAG_END --></p></div></div> <p>Finds the heads and their indices taking <code>already_pruned_heads</code> into account.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.prune_layer"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.prune_layer</span></h4><!-- HTML_TAG_END --> <a id="transformers.prune_layer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.prune_layer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L2377" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">layer<span class="opacity-60">: typing.Union[torch.nn.modules.linear.Linear, transformers.modeling_utils.Conv1D]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">index<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dim<span class="opacity-60">: typing.Optional[int] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>torch.nn.Linear</code> or <a href="/docs/transformers/pr_16143/en/internal/modeling_utils#transformers.Conv1D" >Conv1D</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.prune_layer.layer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.prune_layer.layer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>layer</strong> (<code>Union[torch.nn.Linear, Conv1D]</code>) &#x2014; The layer to prune.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.prune_layer.index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.prune_layer.index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>index</strong> (<code>torch.LongTensor</code>) &#x2014; The indices to keep in the layer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.prune_layer.dim" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.prune_layer.dim"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dim</strong> (<code>int</code>, <em>optional</em>) &#x2014; The dimension on which to keep the indices.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.prune_layer.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>torch.nn.Linear</code> or <a href="/docs/transformers/pr_16143/en/internal/modeling_utils#transformers.Conv1D" >Conv1D</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The pruned layer as a new layer with <code>requires_grad=True</code>.</p> <!-- HTML_TAG_END --></p></div></div> <p>Prune a Conv1D or linear layer to keep only entries in index.</p> <p>Used to remove heads.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_utils.prune_conv1d_layer"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.modeling_utils.prune_conv1d_layer</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_utils.prune_conv1d_layer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_utils.prune_conv1d_layer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L2344" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">layer<span class="opacity-60">: Conv1D</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">index<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dim<span class="opacity-60">: int = 1</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/internal/modeling_utils#transformers.Conv1D" >Conv1D</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.prune_conv1d_layer.layer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.prune_conv1d_layer.layer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>layer</strong> (<a href="/docs/transformers/pr_16143/en/internal/modeling_utils#transformers.Conv1D">Conv1D</a>) &#x2014; The layer to prune.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.prune_conv1d_layer.index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.prune_conv1d_layer.index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>index</strong> (<code>torch.LongTensor</code>) &#x2014; The indices to keep in the layer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.prune_conv1d_layer.dim" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.prune_conv1d_layer.dim"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dim</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; The dimension on which to keep the indices.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.modeling_utils.prune_conv1d_layer.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/internal/modeling_utils#transformers.Conv1D" >Conv1D</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The pruned layer as a new layer with <code>requires_grad=True</code>.</p> <!-- HTML_TAG_END --></p></div></div> <p>Prune a Conv1D layer to keep only entries in index. A Conv1D work as a Linear layer (see e.g. BERT) but the weights are transposed.</p> <p>Used to remove heads.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_utils.prune_linear_layer"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.modeling_utils.prune_linear_layer</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_utils.prune_linear_layer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_utils.prune_linear_layer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_utils.py#L2310" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">layer<span class="opacity-60">: Linear</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">index<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">dim<span class="opacity-60">: int = 0</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>torch.nn.Linear</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.prune_linear_layer.layer" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.prune_linear_layer.layer"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>layer</strong> (<code>torch.nn.Linear</code>) &#x2014; The layer to prune.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.prune_linear_layer.index" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.prune_linear_layer.index"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>index</strong> (<code>torch.LongTensor</code>) &#x2014; The indices to keep in the layer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_utils.prune_linear_layer.dim" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_utils.prune_linear_layer.dim"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>dim</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; The dimension on which to keep the indices.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.modeling_utils.prune_linear_layer.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>torch.nn.Linear</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The pruned layer as a new layer with <code>requires_grad=True</code>.</p> <!-- HTML_TAG_END --></p></div></div> <p>Prune a linear layer to keep only entries in index.</p> <p>Used to remove heads.</p></div> <h2 class="relative group"><a id="transformers.modeling_tf_utils.TFConv1D" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_utils.TFConv1D"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TensorFlow custom layers </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_utils.TFConv1D"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_utils.</span><span class="font-semibold">TFConv1D</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_utils.TFConv1D" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_utils.TFConv1D"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L1819" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_utils.TFConv1D.nf" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_utils.TFConv1D.nf"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>nf</strong> (<code>int</code>) &#x2014; The number of output features.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_utils.TFConv1D.nx" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_utils.TFConv1D.nx"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>nx</strong> (<code>int</code>) &#x2014; The number of input features.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_utils.TFConv1D.initializer_range" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_utils.TFConv1D.initializer_range"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>initializer_range</strong> (<code>float</code>, <em>optional</em>, defaults to 0.02) &#x2014; The standard deviation to use to initialize the weights. kwargs &#x2014; Additional keyword arguments passed along to the <code>__init__</code> of <code>tf.keras.layers.Layer</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2).</p> <p>Basically works like a linear layer but the weights are transposed.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFSharedEmbeddings"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFSharedEmbeddings</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFSharedEmbeddings" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFSharedEmbeddings"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L1859" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFSharedEmbeddings.vocab_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFSharedEmbeddings.vocab_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>vocab_size</strong> (<code>int</code>) &#x2014; The size of the vocabulary, e.g., the number of unique tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFSharedEmbeddings.hidden_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFSharedEmbeddings.hidden_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_size</strong> (<code>int</code>) &#x2014; The size of the embedding vectors.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFSharedEmbeddings.initializer_range" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFSharedEmbeddings.initializer_range"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>initializer_range</strong> (<code>float</code>, <em>optional</em>) &#x2014; The standard deviation to use when initializing the weights. If no value is provided, it will default to {@html &quot;<span class="\&quot;katex\&quot;"><span class="\&quot;katex-mathml\&quot;"><math xmlns="\&quot;http://www.w3.org/1998/Math/MathML\&quot;"><semantics><mrow><mn>1</mn><mi mathvariant="\&quot;normal\&quot;">/</mi><msqrt><mrow><mi>h</mi><mi>i</mi><mi>d</mi><mi>d</mi><mi>e</mi><mi>n</mi><mi mathvariant="\&quot;normal\&quot;">_</mi><mi>s</mi><mi>i</mi><mi>z</mi><mi>e</mi></mrow></msqrt></mrow><annotation encoding="\&quot;application/x-tex\&quot;">1/\\sqrt{hidden\\_size}</annotation></semantics></math></span><span class="\&quot;katex-html\&quot;" aria-hidden="\&quot;true\&quot;"><span class="\&quot;base\&quot;"><span class="\&quot;strut\&quot;" style="\&quot;height:1.24em;vertical-align:-0.3628em;\&quot;"></span><span class="\&quot;mord\&quot;">1/</span><span class="\&quot;mord" sqrt\"><span class="\&quot;vlist-t" vlist-t2\"><span class="\&quot;vlist-r\&quot;"><span class="\&quot;vlist\&quot;" style="\&quot;height:0.8772em;\&quot;"><span class="\&quot;svg-align\&quot;" style="\&quot;top:-3.2em;\&quot;"><span class="\&quot;pstrut\&quot;" style="\&quot;height:3.2em;\&quot;"></span><span class="\&quot;mord\&quot;" style="\&quot;padding-left:1em;\&quot;"><span class="\&quot;mord" mathnormal\">hi</span><span class="\&quot;mord" mathnormal\">dd</span><span class="\&quot;mord" mathnormal\">e</span><span class="\&quot;mord" mathnormal\">n</span><span class="\&quot;mord\&quot;" style="\&quot;margin-right:0.02778em;\&quot;">_</span><span class="\&quot;mord" mathnormal\">s</span><span class="\&quot;mord" mathnormal\">i</span><span class="\&quot;mord" mathnormal\">ze</span></span></span><span style="\&quot;top:-2.8372em;\&quot;"><span class="\&quot;pstrut\&quot;" style="\&quot;height:3.2em;\&quot;"></span><span class="\&quot;hide-tail\&quot;" style="\&quot;min-width:1.02em;height:1.28em;\&quot;"><svg xmlns="\&quot;http://www.w3.org/2000/svg\&quot;" width="400em" height="1.28em" viewBox="0 0 400000 1296" preserveAspectRatio="xMinYMin slice"><path d="M263,681c0.7,0,18,39.7,52,119\nc34,79.3,68.167,158.7,102.5,238c34.3,79.3,51.8,119.3,52.5,120\nc340,-704.7,510.7,-1060.3,512,-1067\nl0 -0\nc4.7,-7.3,11,-11,19,-11\nH40000v40H1012.3\ns-271.3,567,-271.3,567c-38.7,80.7,-84,175,-136,283c-52,108,-89.167,185.3,-111.5,232\nc-22.3,46.7,-33.8,70.3,-34.5,71c-4.7,4.7,-12.3,7,-23,7s-12,-1,-12,-1\ns-109,-253,-109,-253c-72.7,-168,-109.3,-252,-110,-252c-10.7,8,-22,16.7,-34,26\nc-22,17.3,-33.3,26,-34,26s-26,-26,-26,-26s76,-59,76,-59s76,-60,76,-60z\nM1001 80h400000v40h-400000z"/></svg></span></span></span><span class="\&quot;vlist-s\&quot;">&#x200B;</span></span><span class="\&quot;vlist-r\&quot;"><span class="\&quot;vlist\&quot;" style="\&quot;height:0.3628em;\&quot;"><span></span></span></span></span></span></span></span></span>&quot;}. kwargs &#x2014; Additional keyword arguments passed along to the <code>__init__</code> of <code>tf.keras.layers.Layer</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Construct shared token embeddings.</p> <p>The weights of the embedding layer is usually shared with the weights of the linear decoder when doing language modeling.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFSharedEmbeddings.call"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>call</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFSharedEmbeddings.call" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFSharedEmbeddings.call"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L1904" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">inputs<span class="opacity-60">: Tensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mode<span class="opacity-60">: str = &#39;embedding&#39;</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>tf.Tensor</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFSharedEmbeddings.call.inputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFSharedEmbeddings.call.inputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs</strong> (<code>tf.Tensor</code>) &#x2014; In embedding mode, should be an int64 tensor with shape <code>[batch_size, length]</code>.</p> <p>In linear mode, should be a float tensor with shape <code>[batch_size, length, hidden_size]</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFSharedEmbeddings.call.mode" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFSharedEmbeddings.call.mode"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mode</strong> (<code>str</code>, defaults to <code>&quot;embedding&quot;</code>) &#x2014; A valid value is either <code>&quot;embedding&quot;</code> or <code>&quot;linear&quot;</code>, the first one indicates that the layer should be used as an embedding layer, the second one that the layer should be used as a linear decoder.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFSharedEmbeddings.call.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>tf.Tensor</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>In embedding mode, the output is a float32 embedding tensor, with shape <code>[batch_size, length, embedding_size]</code>.</p> <p>In linear mode, the output is a float32 with shape <code>[batch_size, length, vocab_size]</code>.</p> <!-- HTML_TAG_END --></p></div></div> <p>Get token embeddings of inputs or decode final hidden state.</p> <p>Shared weights logic is adapted from <a href="https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24" rel="nofollow">here</a>.</p></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFSequenceSummary"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFSequenceSummary</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFSequenceSummary" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFSequenceSummary"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L1957" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*args<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFSequenceSummary.config" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFSequenceSummary.config"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>config</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/configuration#transformers.PretrainedConfig">PretrainedConfig</a>) &#x2014; The config used by the model. Relevant arguments in the config class of the model are (refer to the actual config class of your model for the default values it uses):</p> <ul> <li> <p><strong>summary_type</strong> (<code>str</code>) &#x2014; The method to use to make this summary. Accepted values are:</p> <ul> <li><code>&quot;last&quot;</code> &#x2014; Take the last token hidden state (like XLNet)</li> <li><code>&quot;first&quot;</code> &#x2014; Take the first token hidden state (like Bert)</li> <li><code>&quot;mean&quot;</code> &#x2014; Take the mean of all tokens hidden states</li> <li><code>&quot;cls_index&quot;</code> &#x2014; Supply a Tensor of classification token position (GPT/GPT-2)</li> <li><code>&quot;attn&quot;</code> &#x2014; Not implemented now, use multi-head attention</li> </ul> </li> <li> <p><strong>summary_use_proj</strong> (<code>bool</code>) &#x2014; Add a projection after the vector extraction.</p> </li> <li> <p><strong>summary_proj_to_labels</strong> (<code>bool</code>) &#x2014; If <code>True</code>, the projection outputs to <code>config.num_labels</code> classes (otherwise to <code>config.hidden_size</code>).</p> </li> <li> <p><strong>summary_activation</strong> (<code>Optional[str]</code>) &#x2014; Set to <code>&quot;tanh&quot;</code> to add a tanh activation to the output, another string or <code>None</code> will add no activation.</p> </li> <li> <p><strong>summary_first_dropout</strong> (<code>float</code>) &#x2014; Optional dropout probability before the projection and activation.</p> </li> <li> <p><strong>summary_last_dropout</strong> (<code>float</code>)&#x2014; Optional dropout probability after the projection and activation.</p> </li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFSequenceSummary.initializer_range" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFSequenceSummary.initializer_range"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>initializer_range</strong> (<code>float</code>, defaults to 0.02) &#x2014; The standard deviation to use to initialize the weights. kwargs &#x2014; Additional keyword arguments passed along to the <code>__init__</code> of <code>tf.keras.layers.Layer</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Compute a single vector summary of a sequence hidden states.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFSequenceSummary.register_for_auto_class"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>register_for_auto_class</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFSequenceSummary.register_for_auto_class" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFSequenceSummary.register_for_auto_class"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L2072" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">auto_class<span class="opacity-60"> = &#39;TFAutoModel&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFSequenceSummary.register_for_auto_class.auto_class" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFSequenceSummary.register_for_auto_class.auto_class"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>auto_class</strong> (<code>str</code> or <code>type</code>, <em>optional</em>, defaults to <code>&quot;TFAutoModel&quot;</code>) &#x2014; The auto class to register this new model with.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Register this class with a given auto class. This should only be used for custom models as the ones in the library are already mapped with an auto class.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"><p>This API is experimental and may have some slight breaking changes in the next releases.</p></div></div></div> <h2 class="relative group"><a id="transformers.modeling_tf_utils.TFCausalLanguageModelingLoss" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_utils.TFCausalLanguageModelingLoss"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TensorFlow loss functions </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_utils.TFCausalLanguageModelingLoss"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_utils.</span><span class="font-semibold">TFCausalLanguageModelingLoss</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_utils.TFCausalLanguageModelingLoss" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_utils.TFCausalLanguageModelingLoss"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L170" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Loss function suitable for causal language modeling (CLM), that is, the task of guessing the next token.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.</p></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_utils.TFMaskedLanguageModelingLoss"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_utils.</span><span class="font-semibold">TFMaskedLanguageModelingLoss</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_utils.TFMaskedLanguageModelingLoss" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_utils.TFMaskedLanguageModelingLoss"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L261" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Loss function suitable for masked language modeling (MLM), that is, the task of guessing the masked tokens.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.</p></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_utils.TFMultipleChoiceLoss"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_utils.</span><span class="font-semibold">TFMultipleChoiceLoss</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_utils.TFMultipleChoiceLoss" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_utils.TFMultipleChoiceLoss"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L251" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Loss function suitable for multiple choice tasks.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_utils.TFQuestionAnsweringLoss"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_utils.</span><span class="font-semibold">TFQuestionAnsweringLoss</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_utils.TFQuestionAnsweringLoss" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_utils.TFQuestionAnsweringLoss"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L192" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Loss function suitable for question answering.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_utils.TFSequenceClassificationLoss"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_utils.</span><span class="font-semibold">TFSequenceClassificationLoss</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_utils.TFSequenceClassificationLoss" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_utils.TFSequenceClassificationLoss"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L235" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Loss function suitable for sequence classification.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_utils.TFTokenClassificationLoss"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.modeling_tf_utils.</span><span class="font-semibold">TFTokenClassificationLoss</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_utils.TFTokenClassificationLoss" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_utils.TFTokenClassificationLoss"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L207" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Loss function suitable for token classification.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.</p></div></div> <h2 class="relative group"><a id="transformers.modeling_tf_utils.get_initializer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_utils.get_initializer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TensorFlow Helper Functions </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_utils.get_initializer"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.modeling_tf_utils.get_initializer</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_utils.get_initializer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_utils.get_initializer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L2099" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">initializer_range<span class="opacity-60">: float = 0.02</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>tf.initializers.TruncatedNormal</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_utils.get_initializer.initializer_range" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_utils.get_initializer.initializer_range"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>initializer_range</strong> (<em>float</em>, defaults to 0.02) &#x2014; Standard deviation of the initializer range.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.modeling_tf_utils.get_initializer.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>tf.initializers.TruncatedNormal</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The truncated normal initializer.</p> <!-- HTML_TAG_END --></p></div></div> <p>Creates a <code>tf.initializers.TruncatedNormal</code> with the given range.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.modeling_tf_utils.keras_serializable"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.modeling_tf_utils.keras_serializable</span></h4><!-- HTML_TAG_END --> <a id="transformers.modeling_tf_utils.keras_serializable" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.modeling_tf_utils.keras_serializable"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/modeling_tf_utils.py#L105" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.modeling_tf_utils.keras_serializable.cls" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.modeling_tf_utils.keras_serializable.cls"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cls</strong> (a <code>tf.keras.layers.Layers subclass</code>) &#x2014; Typically a <code>TF.MainLayer</code> class in this project, in general must accept a <code>config</code> argument to its initializer.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Decorate a Keras Layer class to support Keras serialization.</p> <p>This is done by:</p> <ol><li>Adding a <code>transformers_config</code> dict to the Keras config dictionary in <code>get_config</code> (called by Keras at serialization time.</li> <li>Wrapping <code>__init__</code> to accept that <code>transformers_config</code> dict (passed by Keras at deserialization time) and convert it to a config object for the actual layer initializer.</li> <li>Registering the class as a custom object in Keras (if the Tensorflow version supports this), so that it does not need to be supplied in <code>custom_objects</code> in the call to <code>tf.keras.models.load_model</code>.</li></ol></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.shape_list"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.shape_list</span></h4><!-- HTML_TAG_END --> <a id="transformers.shape_list" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.shape_list"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tf_utils.py#L31" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tensor<span class="opacity-60">: typing.Union[tensorflow.python.framework.ops.Tensor, numpy.ndarray]</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.shape_list.tensor" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.shape_list.tensor"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tensor</strong> (<code>tf.Tensor</code> or <code>np.ndarray</code>) &#x2014; The tensor we want the shape of.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.shape_list.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The shape of the tensor as a list.</p> <!-- HTML_TAG_END --></p></div></div> <p>Deal with dynamic shape in tensorflow cleanly.</p></div> <script type="module" data-hydrate="1vmno5w"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="1vmno5w"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/internal/modeling_utils.mdx-1b04e493.js") ], params: {} } }); </script>
451
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/internal/file_utils.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;general-utilities&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.file_utils.ExplicitEnum&quot;,&quot;title&quot;:&quot;Enums and namedtuples&quot;},{&quot;local&quot;:&quot;transformers.add_start_docstrings&quot;,&quot;title&quot;:&quot;Special Decorators&quot;},{&quot;local&quot;:&quot;transformers.file_utils.cached_property&quot;,&quot;title&quot;:&quot;Special Properties&quot;},{&quot;local&quot;:&quot;transformers._LazyModule&quot;,&quot;title&quot;:&quot;Other Utilities&quot;}],&quot;title&quot;:&quot;General Utilities&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/internal/file_utils.mdx-23310578.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <h1 class="relative group"><a id="general-utilities" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#general-utilities"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>General Utilities </span></h1> <p>This page lists all of Transformers general utility functions that are found in the file <code>file_utils.py</code>.</p> <p>Most of those are only useful if you are studying the general code in the library.</p> <h2 class="relative group"><a id="transformers.file_utils.ExplicitEnum" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.ExplicitEnum"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Enums and namedtuples </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.file_utils.ExplicitEnum"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.file_utils.</span><span class="font-semibold">ExplicitEnum</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.file_utils.ExplicitEnum" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.file_utils.ExplicitEnum"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/file_utils.py#L2710" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">value<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">names<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">module<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">qualname<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">type<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start<span class="opacity-60"> = 1</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Enum with more explicit error message for missing values.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.file_utils.PaddingStrategy"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.file_utils.</span><span class="font-semibold">PaddingStrategy</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.file_utils.PaddingStrategy" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.file_utils.PaddingStrategy"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/file_utils.py#L2722" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">value<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">names<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">module<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">qualname<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">type<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start<span class="opacity-60"> = 1</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Possible values for the <code>padding</code> argument in <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizerBase.<strong>call</strong>()</a>. Useful for tab-completion in an IDE.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TensorType"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TensorType</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TensorType" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TensorType"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/file_utils.py#L2733" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">value<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">names<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">module<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">qualname<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">type<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start<span class="opacity-60"> = 1</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Possible values for the <code>return_tensors</code> argument in <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizerBase.<strong>call</strong>()</a>. Useful for tab-completion in an IDE.</p></div> <h2 class="relative group"><a id="transformers.add_start_docstrings" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.add_start_docstrings"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Special Decorators </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.add_start_docstrings"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.add_start_docstrings</span></h4><!-- HTML_TAG_END --> <a id="transformers.add_start_docstrings" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.add_start_docstrings"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/file_utils.py#L863" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*docstr<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.file_utils.add_start_docstrings_to_model_forward"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.file_utils.add_start_docstrings_to_model_forward</span></h4><!-- HTML_TAG_END --> <a id="transformers.file_utils.add_start_docstrings_to_model_forward" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.file_utils.add_start_docstrings_to_model_forward"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/file_utils.py#L871" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*docstr<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.add_end_docstrings"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.add_end_docstrings</span></h4><!-- HTML_TAG_END --> <a id="transformers.add_end_docstrings" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.add_end_docstrings"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/file_utils.py#L893" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*docstr<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.file_utils.add_code_sample_docstrings"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.file_utils.add_code_sample_docstrings</span></h4><!-- HTML_TAG_END --> <a id="transformers.file_utils.add_code_sample_docstrings" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.file_utils.add_code_sample_docstrings"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/file_utils.py#L1673" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*docstr<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">processor_class<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">checkpoint<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_type<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config_class<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">mask<span class="opacity-60"> = &#39;[MASK]&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model_cls<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">modality<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">expected_output<span class="opacity-60"> = &#39;&#39;</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">expected_loss<span class="opacity-60"> = &#39;&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.file_utils.replace_return_docstrings"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.file_utils.replace_return_docstrings</span></h4><!-- HTML_TAG_END --> <a id="transformers.file_utils.replace_return_docstrings" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.file_utils.replace_return_docstrings"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/file_utils.py#L1748" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_type<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">config_class<span class="opacity-60"> = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div> <h2 class="relative group"><a id="transformers.file_utils.cached_property" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.cached_property"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Special Properties </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.file_utils.cached_property"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.file_utils.</span><span class="font-semibold">cached_property</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.file_utils.cached_property" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.file_utils.cached_property"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/file_utils.py#L2466" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">fget<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">fset<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">fdel<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">doc<span class="opacity-60"> = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Descriptor that mimics @property but caches output in member variable.</p> <p>From tensorflow_datasets</p> <p>Built-in in functools from Python 3.8.</p></div> <h2 class="relative group"><a id="transformers._LazyModule" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers._LazyModule"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Other Utilities </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers._LazyModule"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">_LazyModule</span></span></h3><!-- HTML_TAG_END --> <a id="transformers._LazyModule" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers._LazyModule"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/file_utils.py#L2745" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">name<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">module_file<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">import_structure<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">module_spec<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">extra_objects<span class="opacity-60"> = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Module class that surfaces all objects but only performs associated imports when the objects are requested.</p></div> <script type="module" data-hydrate="1snufn9"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="1snufn9"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/internal/file_utils.mdx-23310578.js") ], params: {} } }); </script>
452
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/internal/pipelines_utils.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;utilities-for-pipelines&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.pipelines.ArgumentHandler&quot;,&quot;title&quot;:&quot;Argument handling&quot;},{&quot;local&quot;:&quot;transformers.PipelineDataFormat&quot;,&quot;title&quot;:&quot;Data format&quot;},{&quot;local&quot;:&quot;transformers.pipelines.PipelineException&quot;,&quot;title&quot;:&quot;Utilities&quot;}],&quot;title&quot;:&quot;Utilities for pipelines&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/internal/pipelines_utils.mdx-885320a3.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <h1 class="relative group"><a id="utilities-for-pipelines" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#utilities-for-pipelines"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Utilities for pipelines </span></h1> <p>This page lists all the utility functions the library provides for pipelines.</p> <p>Most of those are only useful if you are studying the code of the models in the library.</p> <h2 class="relative group"><a id="transformers.pipelines.ArgumentHandler" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.pipelines.ArgumentHandler"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Argument handling </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.pipelines.ArgumentHandler"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.pipelines.</span><span class="font-semibold">ArgumentHandler</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.pipelines.ArgumentHandler" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.pipelines.ArgumentHandler"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/base.py#L394" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Base interface for handling arguments for each <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.Pipeline">Pipeline</a>.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.pipelines.ZeroShotClassificationArgumentHandler"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.pipelines.</span><span class="font-semibold">ZeroShotClassificationArgumentHandler</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.pipelines.ZeroShotClassificationArgumentHandler" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.pipelines.ZeroShotClassificationArgumentHandler"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/zero_shot_classification.py#L14" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Handles arguments for zero-shot for text classification by turning each possible label into an NLI premise/hypothesis pair.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.pipelines.QuestionAnsweringArgumentHandler"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.pipelines.</span><span class="font-semibold">QuestionAnsweringArgumentHandler</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.pipelines.QuestionAnsweringArgumentHandler" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.pipelines.QuestionAnsweringArgumentHandler"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/question_answering.py#L32" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>QuestionAnsweringPipeline requires the user to provide multiple arguments (i.e. question &amp; context) to be mapped to internal <code>SquadExample</code></p> <p>QuestionAnsweringArgumentHandler manages all the possible to create a <code>SquadExample</code>from the command-line supplied arguments.</p></div> <h2 class="relative group"><a id="transformers.PipelineDataFormat" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PipelineDataFormat"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Data format </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PipelineDataFormat"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">PipelineDataFormat</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.PipelineDataFormat" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PipelineDataFormat"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/base.py#L404" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_path<span class="opacity-60">: typing.Optional[str]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_path<span class="opacity-60">: typing.Optional[str]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">column<span class="opacity-60">: typing.Optional[str]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">overwrite<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PipelineDataFormat.output_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PipelineDataFormat.output_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Where to save the outgoing data.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PipelineDataFormat.input_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PipelineDataFormat.input_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Where to look for the input data.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PipelineDataFormat.column" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PipelineDataFormat.column"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>column</strong> (<code>str</code>, <em>optional</em>) &#x2014; The column to read.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PipelineDataFormat.overwrite" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PipelineDataFormat.overwrite"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>overwrite</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to overwrite the <code>output_path</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for all the pipeline supported data format both for reading and writing. Supported data formats currently includes:</p> <ul><li>JSON</li> <li>CSV</li> <li>stdin/stdout (pipe)</li></ul> <p><code>PipelineDataFormat</code> also includes some utilities to work with multi-columns like mapping from datasets columns to pipelines keyword arguments through the <code>dataset_kwarg_1=dataset_column_1</code> format.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PipelineDataFormat.from_str"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>from_str</span></h4><!-- HTML_TAG_END --> <a id="transformers.PipelineDataFormat.from_str" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PipelineDataFormat.from_str"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/base.py#L481" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">format<span class="opacity-60">: str</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_path<span class="opacity-60">: typing.Optional[str]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_path<span class="opacity-60">: typing.Optional[str]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">column<span class="opacity-60">: typing.Optional[str]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">overwrite<span class="opacity-60"> = False</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.PipelineDataFormat" >PipelineDataFormat</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PipelineDataFormat.from_str.output_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PipelineDataFormat.from_str.output_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Where to save the outgoing data.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PipelineDataFormat.from_str.input_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PipelineDataFormat.from_str.input_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Where to look for the input data.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PipelineDataFormat.from_str.column" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PipelineDataFormat.from_str.column"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>column</strong> (<code>str</code>, <em>optional</em>) &#x2014; The column to read.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PipelineDataFormat.from_str.overwrite" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PipelineDataFormat.from_str.overwrite"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>overwrite</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to overwrite the <code>output_path</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PipelineDataFormat.from_str.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.PipelineDataFormat" >PipelineDataFormat</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The proper data format.</p> <!-- HTML_TAG_END --></p></div></div> <p>Creates an instance of the right subclass of <a href="/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.PipelineDataFormat">PipelineDataFormat</a> depending on <code>format</code>.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PipelineDataFormat.save"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save</span></h4><!-- HTML_TAG_END --> <a id="transformers.PipelineDataFormat.save" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PipelineDataFormat.save"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/base.py#L453" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">data<span class="opacity-60">: typing.Union[dict, typing.List[dict]]</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PipelineDataFormat.save.data" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PipelineDataFormat.save.data"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>data</strong> (<code>dict</code> or list of <code>dict</code>) &#x2014; The data to store.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Save the provided data object with the representation for the current <a href="/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.PipelineDataFormat">PipelineDataFormat</a>.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PipelineDataFormat.save_binary"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save_binary</span></h4><!-- HTML_TAG_END --> <a id="transformers.PipelineDataFormat.save_binary" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PipelineDataFormat.save_binary"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/base.py#L463" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">data<span class="opacity-60">: typing.Union[dict, typing.List[dict]]</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>str</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PipelineDataFormat.save_binary.data" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PipelineDataFormat.save_binary.data"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>data</strong> (<code>dict</code> or list of <code>dict</code>) &#x2014; The data to store.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PipelineDataFormat.save_binary.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>str</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Path where the data has been saved.</p> <!-- HTML_TAG_END --></p></div></div> <p>Save the provided data object as a pickle-formatted binary data on the disk.</p></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.CsvPipelineDataFormat"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">CsvPipelineDataFormat</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.CsvPipelineDataFormat" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.CsvPipelineDataFormat"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/base.py#L517" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_path<span class="opacity-60">: typing.Optional[str]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_path<span class="opacity-60">: typing.Optional[str]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">column<span class="opacity-60">: typing.Optional[str]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">overwrite<span class="opacity-60"> = False</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.CsvPipelineDataFormat.output_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.CsvPipelineDataFormat.output_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Where to save the outgoing data.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.CsvPipelineDataFormat.input_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.CsvPipelineDataFormat.input_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Where to look for the input data.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.CsvPipelineDataFormat.column" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.CsvPipelineDataFormat.column"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>column</strong> (<code>str</code>, <em>optional</em>) &#x2014; The column to read.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.CsvPipelineDataFormat.overwrite" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.CsvPipelineDataFormat.overwrite"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>overwrite</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to overwrite the <code>output_path</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Support for pipelines using CSV data format.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.CsvPipelineDataFormat.save"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save</span></h4><!-- HTML_TAG_END --> <a id="transformers.CsvPipelineDataFormat.save" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.CsvPipelineDataFormat.save"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/base.py#L547" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">data<span class="opacity-60">: typing.List[dict]</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.CsvPipelineDataFormat.save.data" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.CsvPipelineDataFormat.save.data"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>data</strong> (<code>List[dict]</code>) &#x2014; The data to store.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Save the provided data object with the representation for the current <a href="/docs/transformers/pr_16143/en/internal/pipelines_utils#transformers.PipelineDataFormat">PipelineDataFormat</a>.</p></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.JsonPipelineDataFormat"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">JsonPipelineDataFormat</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.JsonPipelineDataFormat" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.JsonPipelineDataFormat"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/base.py#L561" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_path<span class="opacity-60">: typing.Optional[str]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_path<span class="opacity-60">: typing.Optional[str]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">column<span class="opacity-60">: typing.Optional[str]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">overwrite<span class="opacity-60"> = False</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.JsonPipelineDataFormat.output_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.JsonPipelineDataFormat.output_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Where to save the outgoing data.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.JsonPipelineDataFormat.input_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.JsonPipelineDataFormat.input_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Where to look for the input data.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.JsonPipelineDataFormat.column" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.JsonPipelineDataFormat.column"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>column</strong> (<code>str</code>, <em>optional</em>) &#x2014; The column to read.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.JsonPipelineDataFormat.overwrite" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.JsonPipelineDataFormat.overwrite"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>overwrite</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to overwrite the <code>output_path</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Support for pipelines using JSON file format.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.JsonPipelineDataFormat.save"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save</span></h4><!-- HTML_TAG_END --> <a id="transformers.JsonPipelineDataFormat.save" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.JsonPipelineDataFormat.save"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/base.py#L592" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">data<span class="opacity-60">: dict</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.JsonPipelineDataFormat.save.data" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.JsonPipelineDataFormat.save.data"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>data</strong> (<code>dict</code>) &#x2014; The data to store.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Save the provided data object in a json file.</p></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PipedPipelineDataFormat"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">PipedPipelineDataFormat</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.PipedPipelineDataFormat" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PipedPipelineDataFormat"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/base.py#L603" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">output_path<span class="opacity-60">: typing.Optional[str]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_path<span class="opacity-60">: typing.Optional[str]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">column<span class="opacity-60">: typing.Optional[str]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">overwrite<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PipedPipelineDataFormat.output_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PipedPipelineDataFormat.output_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>output_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Where to save the outgoing data.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PipedPipelineDataFormat.input_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PipedPipelineDataFormat.input_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_path</strong> (<code>str</code>, <em>optional</em>) &#x2014; Where to look for the input data.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PipedPipelineDataFormat.column" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PipedPipelineDataFormat.column"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>column</strong> (<code>str</code>, <em>optional</em>) &#x2014; The column to read.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PipedPipelineDataFormat.overwrite" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PipedPipelineDataFormat.overwrite"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>overwrite</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to overwrite the <code>output_path</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Read data from piped input to the python process. For multi columns data, columns should separated by </p> <p>If columns are provided, then the output will be a dictionary with {column_x: value_x}</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PipedPipelineDataFormat.save"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save</span></h4><!-- HTML_TAG_END --> <a id="transformers.PipedPipelineDataFormat.save" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PipedPipelineDataFormat.save"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/base.py#L633" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">data<span class="opacity-60">: dict</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PipedPipelineDataFormat.save.data" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PipedPipelineDataFormat.save.data"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>data</strong> (<code>dict</code>) &#x2014; The data to store.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Print the data.</p></div></div> <h2 class="relative group"><a id="transformers.pipelines.PipelineException" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.pipelines.PipelineException"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Utilities </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.pipelines.PipelineException"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.pipelines.</span><span class="font-semibold">PipelineException</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.pipelines.PipelineException" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.pipelines.PipelineException"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/pipelines/base.py#L377" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">task<span class="opacity-60">: str</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">model<span class="opacity-60">: str</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">reason<span class="opacity-60">: str</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.pipelines.PipelineException.task" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.pipelines.PipelineException.task"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>task</strong> (<code>str</code>) &#x2014; The task of the pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.pipelines.PipelineException.model" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.pipelines.PipelineException.model"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model</strong> (<code>str</code>) &#x2014; The model used by the pipeline.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.pipelines.PipelineException.reason" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.pipelines.PipelineException.reason"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>reason</strong> (<code>str</code>) &#x2014; The error message to display.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Raised by a <a href="/docs/transformers/pr_16143/en/main_classes/pipelines#transformers.Pipeline">Pipeline</a> when handling <strong>call</strong>.</p></div> <script type="module" data-hydrate="wp8ey1"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="wp8ey1"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/internal/pipelines_utils.mdx-885320a3.js") ], params: {} } }); </script>
453
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/internal/generation_utils.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;utilities-for-generation&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;generate-outputs&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.generation_utils.GreedySearchDecoderOnlyOutput&quot;,&quot;title&quot;:&quot;GreedySearchOutput&quot;},{&quot;local&quot;:&quot;transformers.generation_utils.SampleDecoderOnlyOutput&quot;,&quot;title&quot;:&quot;SampleOutput&quot;},{&quot;local&quot;:&quot;transformers.generation_utils.BeamSearchDecoderOnlyOutput&quot;,&quot;title&quot;:&quot;BeamSearchOutput&quot;},{&quot;local&quot;:&quot;transformers.generation_utils.BeamSampleDecoderOnlyOutput&quot;,&quot;title&quot;:&quot;BeamSampleOutput&quot;}],&quot;title&quot;:&quot;Generate Outputs&quot;},{&quot;local&quot;:&quot;transformers.LogitsProcessor&quot;,&quot;title&quot;:&quot;LogitsProcessor&quot;},{&quot;local&quot;:&quot;transformers.StoppingCriteria&quot;,&quot;title&quot;:&quot;StoppingCriteria&quot;},{&quot;local&quot;:&quot;transformers.Constraint&quot;,&quot;title&quot;:&quot;Constraints&quot;},{&quot;local&quot;:&quot;transformers.BeamScorer&quot;,&quot;title&quot;:&quot;BeamSearch&quot;},{&quot;local&quot;:&quot;transformers.top_k_top_p_filtering&quot;,&quot;title&quot;:&quot;Utilities&quot;}],&quot;title&quot;:&quot;Utilities for Generation&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/internal/generation_utils.mdx-8b17d2b2.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="utilities-for-generation" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#utilities-for-generation"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Utilities for Generation </span></h1> <p>This page lists all the utility functions used by <a href="/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.generate">generate()</a>, <a href="/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.greedy_search">greedy_search()</a>, <a href="/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.sample">sample()</a>, <a href="/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.beam_search">beam_search()</a>, <a href="/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.beam_sample">beam_sample()</a>, <a href="/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.group_beam_search">group_beam_search()</a>, and <a href="/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.constrained_beam_search">constrained_beam_search()</a>.</p> <p>Most of those are only useful if you are studying the code of the generate methods in the library.</p> <h2 class="relative group"><a id="generate-outputs" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#generate-outputs"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Generate Outputs </span></h2> <p>The output of <a href="/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.generate">generate()</a> is an instance of a subclass of <a href="/docs/transformers/pr_16143/en/main_classes/output#transformers.file_utils.ModelOutput">ModelOutput</a>. This output is a data structure containing all the information returned by <a href="/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.generate">generate()</a>, but that can also be used as tuple or dictionary.</p> <p>Here’s an example:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> GPT2Tokenizer, GPT2LMHeadModel tokenizer = GPT2Tokenizer.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) model = GPT2LMHeadModel.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) inputs = tokenizer(<span class="hljs-string">&quot;Hello, my dog is cute and &quot;</span>, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>) generation_output = model.generate(**inputs, return_dict_in_generate=<span class="hljs-literal">True</span>, output_scores=<span class="hljs-literal">True</span>)<!-- HTML_TAG_END --></pre></div> <p>The <code>generation_output</code> object is a <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.generation_utils.GreedySearchDecoderOnlyOutput">GreedySearchDecoderOnlyOutput</a>, as we can see in the documentation of that class below, it means it has the following attributes:</p> <ul><li><code>sequences</code>: the generated sequences of tokens</li> <li><code>scores</code> (optional): the prediction scores of the language modelling head, for each generation step</li> <li><code>hidden_states</code> (optional): the hidden states of the model, for each generation step</li> <li><code>attentions</code> (optional): the attention weights of the model, for each generation step</li></ul> <p>Here we have the <code>scores</code> since we passed along <code>output_scores=True</code>, but we don’t have <code>hidden_states</code> and <code>attentions</code> because we didn’t pass <code>output_hidden_states=True</code> or <code>output_attentions=True</code>.</p> <p>You can access each attribute as you would usually do, and if that attribute has not been returned by the model, you will get <code>None</code>. Here for instance <code>generation_output.scores</code> are all the generated prediction scores of the language modeling head, and <code>generation_output.attentions</code> is <code>None</code>.</p> <p>When using our <code>generation_output</code> object as a tuple, it only keeps the attributes that don’t have <code>None</code> values. Here, for instance, it has two elements, <code>loss</code> then <code>logits</code>, so</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->generation_output[:<span class="hljs-number">2</span>]<!-- HTML_TAG_END --></pre></div> <p>will return the tuple <code>(generation_output.sequences, generation_output.scores)</code> for instance.</p> <p>When using our <code>generation_output</code> object as a dictionary, it only keeps the attributes that don’t have <code>None</code> values. Here, for instance, it has two keys that are <code>sequences</code> and <code>scores</code>.</p> <p>We document here all output types.</p> <h3 class="relative group"><a id="transformers.generation_utils.GreedySearchDecoderOnlyOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GreedySearchDecoderOnlyOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>GreedySearchOutput </span></h3> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.generation_utils.GreedySearchDecoderOnlyOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.generation_utils.</span><span class="font-semibold">GreedySearchDecoderOnlyOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.generation_utils.GreedySearchDecoderOnlyOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.generation_utils.GreedySearchDecoderOnlyOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_utils.py#L62" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sequences<span class="opacity-60">: LongTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GreedySearchDecoderOnlyOutput.sequences" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GreedySearchDecoderOnlyOutput.sequences"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequences</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; The generated sequences. The second dimension (sequence_length) is either equal to <code>max_length</code> or shorter if all batches finished early due to the <code>eos_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GreedySearchDecoderOnlyOutput.scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GreedySearchDecoderOnlyOutput.scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scores</strong> (<code>tuple(torch.FloatTensor)</code> <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) at each generation step. <code>(max_length-input_ids.shape[-1],)</code>-shaped tuple of <code>torch.FloatTensor</code> with each tensor of shape <code>(batch_size, config.vocab_size)</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GreedySearchDecoderOnlyOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GreedySearchDecoderOnlyOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size, num_heads, generated_length, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GreedySearchDecoderOnlyOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GreedySearchDecoderOnlyOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size, generated_length, hidden_size)</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of decoder-only generation models using greedy search.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.generation_utils.GreedySearchEncoderDecoderOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.generation_utils.</span><span class="font-semibold">GreedySearchEncoderDecoderOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.generation_utils.GreedySearchEncoderDecoderOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.generation_utils.GreedySearchEncoderDecoderOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_utils.py#L90" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sequences<span class="opacity-60">: LongTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GreedySearchEncoderDecoderOutput.sequences" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GreedySearchEncoderDecoderOutput.sequences"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequences</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; The generated sequences. The second dimension (sequence_length) is either equal to <code>max_length</code> or shorter if all batches finished early due to the <code>eos_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GreedySearchEncoderDecoderOutput.scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GreedySearchEncoderDecoderOutput.scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scores</strong> (<code>tuple(torch.FloatTensor)</code> <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) at each generation step. <code>(max_length-1,)</code>-shaped tuple of <code>torch.FloatTensor</code> with each tensor of shape <code>(batch_size, config.vocab_size)</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GreedySearchEncoderDecoderOutput.encoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GreedySearchEncoderDecoderOutput.encoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer of the decoder) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GreedySearchEncoderDecoderOutput.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GreedySearchEncoderDecoderOutput.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size, sequence_length, hidden_size)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GreedySearchEncoderDecoderOutput.decoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GreedySearchEncoderDecoderOutput.decoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size, num_heads, generated_length, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GreedySearchEncoderDecoderOutput.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GreedySearchEncoderDecoderOutput.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size, num_heads, generated_length, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.GreedySearchEncoderDecoderOutput.decoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.GreedySearchEncoderDecoderOutput.decoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_hidden_states</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size, generated_length, hidden_size)</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of encoder-decoder generation models using greedy search. Hidden states and attention weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.generation_flax_utils.FlaxGreedySearchOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.generation_flax_utils.</span><span class="font-semibold">FlaxGreedySearchOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.generation_flax_utils.FlaxGreedySearchOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.generation_flax_utils.FlaxGreedySearchOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_flax_utils.py#L45" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sequences<span class="opacity-60">: ndarray = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_flax_utils.FlaxGreedySearchOutput.sequences" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_flax_utils.FlaxGreedySearchOutput.sequences"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequences</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, max_length)</code>) &#x2014; The generated sequences.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Flax Base class for outputs of decoder-only generation models using greedy search.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="None"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>replace</span></h4><!-- HTML_TAG_END --> <a id="None" class="header-link invisible with-hover:group-hover:visible pr-2" href="#None"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/flax/struct.py#L120" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**updates<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>“Returns a new object replacing the specified fields with new values.</p></div></div> <h3 class="relative group"><a id="transformers.generation_utils.SampleDecoderOnlyOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.SampleDecoderOnlyOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>SampleOutput </span></h3> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.generation_utils.SampleDecoderOnlyOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.generation_utils.</span><span class="font-semibold">SampleDecoderOnlyOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.generation_utils.SampleDecoderOnlyOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.generation_utils.SampleDecoderOnlyOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_utils.py#L132" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sequences<span class="opacity-60">: LongTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.SampleDecoderOnlyOutput.sequences" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.SampleDecoderOnlyOutput.sequences"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequences</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size*num_return_sequences, sequence_length)</code>) &#x2014; The generated sequences. The second dimension (sequence_length) is either equal to <code>max_length</code> or shorter if all batches finished early due to the <code>eos_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.SampleDecoderOnlyOutput.scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.SampleDecoderOnlyOutput.scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scores</strong> (<code>tuple(torch.FloatTensor)</code> <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) at each generation step. <code>(max_length-input_ids.shape[-1],)</code>-shaped tuple of <code>torch.FloatTensor</code> with each tensor of shape <code>(batch_size*num_return_sequences, config.vocab_size)</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.SampleDecoderOnlyOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.SampleDecoderOnlyOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(num_return_sequences*batch_size, num_heads, generated_length, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.SampleDecoderOnlyOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.SampleDecoderOnlyOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(num_return_sequences*batch_size, generated_length, hidden_size)</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of decoder-only generation models using sampling.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.generation_utils.SampleEncoderDecoderOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.generation_utils.</span><span class="font-semibold">SampleEncoderDecoderOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.generation_utils.SampleEncoderDecoderOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.generation_utils.SampleEncoderDecoderOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_utils.py#L161" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sequences<span class="opacity-60">: LongTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.SampleEncoderDecoderOutput.sequences" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.SampleEncoderDecoderOutput.sequences"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequences</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size*num_return_sequences, sequence_length)</code>) &#x2014; The generated sequences. The second dimension (sequence_length) is either equal to <code>max_length</code> or shorter if all batches finished early due to the <code>eos_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.SampleEncoderDecoderOutput.scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.SampleEncoderDecoderOutput.scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scores</strong> (<code>tuple(torch.FloatTensor)</code> <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) at each generation step. <code>(max_length-1,)</code>-shaped tuple of <code>torch.FloatTensor</code> with each tensor of shape <code>(batch_size*num_return_sequences, config.vocab_size)</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.SampleEncoderDecoderOutput.encoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.SampleEncoderDecoderOutput.encoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer of the decoder) of shape <code>(batch_size*num_return_sequences, num_heads, sequence_length, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.SampleEncoderDecoderOutput.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.SampleEncoderDecoderOutput.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size*num_return_sequences, sequence_length, hidden_size)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.SampleEncoderDecoderOutput.decoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.SampleEncoderDecoderOutput.decoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size*num_return_sequences, num_heads, generated_length, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.SampleEncoderDecoderOutput.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.SampleEncoderDecoderOutput.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size, num_heads, generated_length, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.SampleEncoderDecoderOutput.decoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.SampleEncoderDecoderOutput.decoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_hidden_states</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size*num_return_sequences, generated_length, hidden_size)</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of encoder-decoder generation models using sampling. Hidden states and attention weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.generation_flax_utils.FlaxSampleOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.generation_flax_utils.</span><span class="font-semibold">FlaxSampleOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.generation_flax_utils.FlaxSampleOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.generation_flax_utils.FlaxSampleOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_flax_utils.py#L59" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sequences<span class="opacity-60">: ndarray = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_flax_utils.FlaxSampleOutput.sequences" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_flax_utils.FlaxSampleOutput.sequences"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequences</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, max_length)</code>) &#x2014; The generated sequences.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Flax Base class for outputs of decoder-only generation models using sampling.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="None"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>replace</span></h4><!-- HTML_TAG_END --> <a id="None" class="header-link invisible with-hover:group-hover:visible pr-2" href="#None"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/flax/struct.py#L120" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**updates<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>“Returns a new object replacing the specified fields with new values.</p></div></div> <h3 class="relative group"><a id="transformers.generation_utils.BeamSearchDecoderOnlyOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSearchDecoderOnlyOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>BeamSearchOutput </span></h3> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.generation_utils.BeamSearchDecoderOnlyOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.generation_utils.</span><span class="font-semibold">BeamSearchDecoderOnlyOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.generation_utils.BeamSearchDecoderOnlyOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.generation_utils.BeamSearchDecoderOnlyOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_utils.py#L204" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sequences<span class="opacity-60">: LongTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sequences_scores<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">beam_indices<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.LongTensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSearchDecoderOnlyOutput.sequences" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSearchDecoderOnlyOutput.sequences"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequences</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size*num_return_sequences, sequence_length)</code>) &#x2014; The generated sequences. The second dimension (sequence_length) is either equal to <code>max_length</code> or shorter if all batches finished early due to the <code>eos_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSearchDecoderOnlyOutput.sequences_scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSearchDecoderOnlyOutput.sequences_scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequences_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size*num_return_sequences)</code>, <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Final beam scores of the generated <code>sequences</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSearchDecoderOnlyOutput.scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSearchDecoderOnlyOutput.scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scores</strong> (<code>tuple(torch.FloatTensor)</code> <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Beam transition scores for each vocabulary token at each generation step. Beam transition scores consisting of log probabilities of tokens conditioned on log softmax of previously generated tokens in this beam. <code>(max_length-input_ids.shape[-1],)</code>-shaped tuple of <code>torch.FloatTensor</code> with each tensor of shape <code>(batch_size*num_beams*num_return_sequences, config.vocab_size)</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSearchDecoderOnlyOutput.beam_indices" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSearchDecoderOnlyOutput.beam_indices"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>beam_indices</strong> (<code>tuple(tuple(torch.LongTensor))</code>, <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Beam indices of generated token id at each generation step. <code>(batch_size*num_return_sequences)</code>-shaped tuple of <code>(max_length-input_ids.shape[-1],)</code>-shaped tuples of scalar <code>torch.LongTensor</code> tensors.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSearchDecoderOnlyOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSearchDecoderOnlyOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size*num_beams, num_heads, generated_length, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSearchDecoderOnlyOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSearchDecoderOnlyOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size*num_beams*num_return_sequences, generated_length, hidden_size)</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of decoder-only generation models using beam search.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.generation_utils.BeamSearchEncoderDecoderOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.generation_utils.</span><span class="font-semibold">BeamSearchEncoderDecoderOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.generation_utils.BeamSearchEncoderDecoderOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.generation_utils.BeamSearchEncoderDecoderOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_utils.py#L239" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sequences<span class="opacity-60">: LongTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sequences_scores<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">beam_indices<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.LongTensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSearchEncoderDecoderOutput.sequences" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSearchEncoderDecoderOutput.sequences"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequences</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size*num_return_sequences, sequence_length)</code>) &#x2014; The generated sequences. The second dimension (sequence_length) is either equal to <code>max_length</code> or shorter if all batches finished early due to the <code>eos_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSearchEncoderDecoderOutput.sequences_scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSearchEncoderDecoderOutput.sequences_scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequences_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size*num_return_sequences)</code>, <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Final beam scores of the generated <code>sequences</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSearchEncoderDecoderOutput.scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSearchEncoderDecoderOutput.scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scores</strong> (<code>tuple(torch.FloatTensor)</code> <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Beam transition scores for each vocabulary token at each generation step. Beam transition scores consisting of log probabilities of tokens conditioned on log softmax of previously generated tokens in this beam. <code>(max_length-1,)</code>-shaped tuple of <code>torch.FloatTensor</code> with each tensor of shape <code>(batch_size*num_beams, config.vocab_size)</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSearchEncoderDecoderOutput.beam_indices" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSearchEncoderDecoderOutput.beam_indices"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>beam_indices</strong> (<code>tuple(tuple(torch.LongTensor))</code>, <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Beam indices of generated token id at each generation step. <code>(batch_size*num_return_sequences)</code>-shaped tuple of <code>(max_length-1,)</code>-shaped tuples of scalar <code>torch.LongTensor</code> tensors.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSearchEncoderDecoderOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSearchEncoderDecoderOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014;<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSearchEncoderDecoderOutput.encoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSearchEncoderDecoderOutput.encoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer of the decoder) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSearchEncoderDecoderOutput.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSearchEncoderDecoderOutput.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size*num_beams*num_return_sequences, sequence_length, hidden_size)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSearchEncoderDecoderOutput.decoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSearchEncoderDecoderOutput.decoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size*num_beams*num_return_sequences, num_heads, generated_length, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSearchEncoderDecoderOutput.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSearchEncoderDecoderOutput.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size, num_heads, generated_length, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSearchEncoderDecoderOutput.decoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSearchEncoderDecoderOutput.decoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_hidden_states</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size*num_beams*num_return_sequences, generated_length, hidden_size)</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of encoder-decoder generation models using beam search. Hidden states and attention weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)</p></div> <h3 class="relative group"><a id="transformers.generation_utils.BeamSampleDecoderOnlyOutput" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSampleDecoderOnlyOutput"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>BeamSampleOutput </span></h3> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.generation_utils.BeamSampleDecoderOnlyOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.generation_utils.</span><span class="font-semibold">BeamSampleDecoderOnlyOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.generation_utils.BeamSampleDecoderOnlyOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.generation_utils.BeamSampleDecoderOnlyOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_utils.py#L290" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sequences<span class="opacity-60">: LongTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sequences_scores<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">beam_indices<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.LongTensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">attentions<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSampleDecoderOnlyOutput.sequences" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSampleDecoderOnlyOutput.sequences"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequences</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size*num_return_sequences, sequence_length)</code>) &#x2014; The generated sequences. The second dimension (sequence_length) is either equal to <code>max_length</code> or shorter if all batches finished early due to the <code>eos_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSampleDecoderOnlyOutput.sequences_scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSampleDecoderOnlyOutput.sequences_scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequences_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_return_sequence)</code>, <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Final beam scores of the generated <code>sequences</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSampleDecoderOnlyOutput.scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSampleDecoderOnlyOutput.scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scores</strong> (<code>tuple(torch.FloatTensor)</code> <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Beam transition scores for each vocabulary token at each generation step. Beam transition scores consisting of log probabilities of tokens conditioned on log softmax of previously generated tokens in this beam. <code>(max_length-input_ids.shape[-1],)</code>-shaped tuple of <code>torch.FloatTensor</code> with each tensor of shape <code>(batch_size*num_beams*num_return_sequences, config.vocab_size)</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSampleDecoderOnlyOutput.beam_indices" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSampleDecoderOnlyOutput.beam_indices"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>beam_indices</strong> (<code>tuple(tuple(torch.LongTensor))</code>, <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Beam indices of generated token id at each generation step. <code>(batch_size*num_return_sequences)</code>-shaped tuple of <code>(max_length-input_ids.shape[-1],)</code>-shaped tuples of scalar <code>torch.LongTensor</code> tensors.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSampleDecoderOnlyOutput.attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSampleDecoderOnlyOutput.attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size*num_beams, num_heads, generated_length, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSampleDecoderOnlyOutput.hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSampleDecoderOnlyOutput.hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>hidden_states</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size*num_beams, generated_length, hidden_size)</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of decoder-only generation models using beam sample.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.generation_utils.BeamSampleEncoderDecoderOutput"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.generation_utils.</span><span class="font-semibold">BeamSampleEncoderDecoderOutput</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.generation_utils.BeamSampleEncoderDecoderOutput" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.generation_utils.BeamSampleEncoderDecoderOutput"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_utils.py#L325" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sequences<span class="opacity-60">: LongTensor = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sequences_scores<span class="opacity-60">: typing.Optional[torch.FloatTensor] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">beam_indices<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.LongTensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[torch.FloatTensor]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cross_attentions<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">decoder_hidden_states<span class="opacity-60">: typing.Optional[typing.Tuple[typing.Tuple[torch.FloatTensor]]] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSampleEncoderDecoderOutput.sequences" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSampleEncoderDecoderOutput.sequences"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequences</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size*num_beams, sequence_length)</code>) &#x2014; The generated sequences. The second dimension (sequence_length) is either equal to <code>max_length</code> or shorter if all batches finished early due to the <code>eos_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSampleEncoderDecoderOutput.sequences_scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSampleEncoderDecoderOutput.sequences_scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequences_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_return_sequence)</code>, <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Final beam scores of the generated <code>sequences</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSampleEncoderDecoderOutput.scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSampleEncoderDecoderOutput.scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scores</strong> (<code>tuple(torch.FloatTensor)</code> <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Beam transition scores for each vocabulary token at each generation step. Beam transition scores consisting of log probabilities of tokens conditioned on log softmax of previously generated tokens in this beam. <code>(max_length-1,)</code>-shaped tuple of <code>torch.FloatTensor</code> with each tensor of shape <code>(batch_size*num_beams, config.vocab_size)</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSampleEncoderDecoderOutput.beam_indices" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSampleEncoderDecoderOutput.beam_indices"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>beam_indices</strong> (<code>tuple(tuple(torch.LongTensor))</code>, <em>optional</em>, returned when <code>output_scores=True</code> is passed or when <code>config.output_scores=True</code>) &#x2014; Beam indices of generated token id at each generation step. <code>(batch_size*num_return_sequences)</code>-shaped tuple of <code>(max_length-1,)</code>-shaped tuples of scalar <code>torch.LongTensor</code> tensors.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSampleEncoderDecoderOutput.encoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSampleEncoderDecoderOutput.encoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_attentions</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for each layer of the decoder) of shape <code>(batch_size, num_heads, sequence_length, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSampleEncoderDecoderOutput.encoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSampleEncoderDecoderOutput.encoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoder_hidden_states</strong> (<code>tuple(torch.FloatTensor)</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple of <code>torch.FloatTensor</code> (one for the output of the embeddings + one for the output of each layer) of shape <code>(batch_size*num_beams, sequence_length, hidden_size)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSampleEncoderDecoderOutput.decoder_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSampleEncoderDecoderOutput.decoder_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size*num_beams, num_heads, generated_length, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSampleEncoderDecoderOutput.cross_attentions" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSampleEncoderDecoderOutput.cross_attentions"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cross_attentions</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_attentions=True</code> is passed or <code>config.output_attentions=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size, num_heads, generated_length, sequence_length)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.generation_utils.BeamSampleEncoderDecoderOutput.decoder_hidden_states" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.generation_utils.BeamSampleEncoderDecoderOutput.decoder_hidden_states"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>decoder_hidden_states</strong> (<code>tuple(tuple(torch.FloatTensor))</code>, <em>optional</em>, returned when <code>output_hidden_states=True</code> is passed or when <code>config.output_hidden_states=True</code>) &#x2014; Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of <code>torch.FloatTensor</code> of shape <code>(batch_size*num_beams, generated_length, hidden_size)</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for outputs of encoder-decoder generation models using beam sampling. Hidden states and attention weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)</p></div> <h2 class="relative group"><a id="transformers.LogitsProcessor" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LogitsProcessor"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>LogitsProcessor </span></h2> <p>A <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> can be used to modify the prediction scores of a language model head for generation.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LogitsProcessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">LogitsProcessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.LogitsProcessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LogitsProcessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L51" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Abstract base class for all logit processors that can be applied during generation.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LogitsProcessor.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.LogitsProcessor.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LogitsProcessor.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L54" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: FloatTensor</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>torch.FloatTensor</code> of shape <code>(batch_size, config.vocab_size)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LogitsProcessor.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LogitsProcessor.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LogitsProcessor.__call__.scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LogitsProcessor.__call__.scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs &#x2014; Additional logits processor specific kwargs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.LogitsProcessor.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>torch.FloatTensor</code> of shape <code>(batch_size, config.vocab_size)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The processed prediction scores.</p> <!-- HTML_TAG_END --></p></div></div> <p>Torch method for processing logits.</p></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LogitsProcessorList"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">LogitsProcessorList</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.LogitsProcessorList" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LogitsProcessorList"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L73" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">iterable<span class="opacity-60"> = ()</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>This class can be used to create a list of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> or <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsWarper">LogitsWarper</a> to subsequently process a <code>scores</code> input tensor. This class inherits from list and adds a specific <em><strong>call</strong></em> method to apply each <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> or <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsWarper">LogitsWarper</a> to the inputs.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LogitsProcessorList.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.LogitsProcessorList.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LogitsProcessorList.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L80" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: FloatTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>torch.FloatTensor</code> of shape <code>(batch_size, config.vocab_size)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LogitsProcessorList.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LogitsProcessorList.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LogitsProcessorList.__call__.scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LogitsProcessorList.__call__.scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs &#x2014; Additional logits processor specific kwargs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.LogitsProcessorList.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>torch.FloatTensor</code> of shape <code>(batch_size, config.vocab_size)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The processed prediction scores.</p> <!-- HTML_TAG_END --></p></div></div></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LogitsWarper"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">LogitsWarper</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.LogitsWarper" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LogitsWarper"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L62" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Abstract base class for all logit warpers that can be applied during generation with multinomial sampling.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.LogitsWarper.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.LogitsWarper.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.LogitsWarper.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L65" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: FloatTensor</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>torch.FloatTensor</code> of shape <code>(batch_size, config.vocab_size)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LogitsWarper.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LogitsWarper.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.LogitsWarper.__call__.scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.LogitsWarper.__call__.scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs &#x2014; Additional logits processor specific kwargs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.LogitsWarper.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>torch.FloatTensor</code> of shape <code>(batch_size, config.vocab_size)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The processed prediction scores.</p> <!-- HTML_TAG_END --></p></div></div> <p>Torch method for warping logits.</p></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MinLengthLogitsProcessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">MinLengthLogitsProcessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.MinLengthLogitsProcessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MinLengthLogitsProcessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L96" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">min_length<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MinLengthLogitsProcessor.min_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MinLengthLogitsProcessor.min_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>min_length</strong> (<code>int</code>) &#x2014; The minimum length below which the score of <code>eos_token_id</code> is set to <code>-float(&quot;Inf&quot;)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MinLengthLogitsProcessor.eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MinLengthLogitsProcessor.eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token_id</strong> (<code>int</code>) &#x2014; The id of the <em>end-of-sequence</em> token.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> enforcing a min-length by setting EOS probability to 0.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MinLengthLogitsProcessor.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.MinLengthLogitsProcessor.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MinLengthLogitsProcessor.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L117" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: FloatTensor</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TemperatureLogitsWarper"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TemperatureLogitsWarper</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TemperatureLogitsWarper" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TemperatureLogitsWarper"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L124" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">temperature<span class="opacity-60">: float</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TemperatureLogitsWarper.temperature" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TemperatureLogitsWarper.temperature"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>temperature</strong> (<code>float</code>) &#x2014; The value used to module the logits distribution.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsWarper">LogitsWarper</a> for temperature (exponential scaling output probability distribution).</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TemperatureLogitsWarper.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.TemperatureLogitsWarper.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TemperatureLogitsWarper.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L139" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: Tensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: Tensor</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RepetitionPenaltyLogitsProcessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">RepetitionPenaltyLogitsProcessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.RepetitionPenaltyLogitsProcessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RepetitionPenaltyLogitsProcessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L144" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">penalty<span class="opacity-60">: float</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.RepetitionPenaltyLogitsProcessor.repetition_penalty" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.RepetitionPenaltyLogitsProcessor.repetition_penalty"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>repetition_penalty</strong> (<code>float</code>) &#x2014; The parameter for repetition penalty. 1.0 means no penalty. See <a href="https://arxiv.org/pdf/1909.05858.pdf" rel="nofollow">this paper</a> for more details.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> enforcing an exponential penalty on repeated sequences.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.RepetitionPenaltyLogitsProcessor.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.RepetitionPenaltyLogitsProcessor.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.RepetitionPenaltyLogitsProcessor.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L160" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: FloatTensor</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TopPLogitsWarper"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TopPLogitsWarper</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TopPLogitsWarper" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TopPLogitsWarper"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L170" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">top_p<span class="opacity-60">: float</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">filter_value<span class="opacity-60">: float = -inf</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">min_tokens_to_keep<span class="opacity-60">: int = 1</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TopPLogitsWarper.top_p" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TopPLogitsWarper.top_p"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>top_p</strong> (<code>float</code>) &#x2014; If set to &lt; 1, only the most probable tokens with probabilities that add up to <code>top_p</code> or higher are kept for generation.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TopPLogitsWarper.filter_value" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TopPLogitsWarper.filter_value"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>filter_value</strong> (<code>float</code>, <em>optional</em>, defaults to <code>-float(&quot;Inf&quot;)</code>) &#x2014; All filtered values will be set to this float value.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TopPLogitsWarper.min_tokens_to_keep" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TopPLogitsWarper.min_tokens_to_keep"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>min_tokens_to_keep</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Minimum number of tokens that cannot be filtered.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsWarper">LogitsWarper</a> that performs top-p, i.e. restricting to top tokens summing to prob_cut_off &lt;= prob_cut_off.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TopPLogitsWarper.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.TopPLogitsWarper.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TopPLogitsWarper.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L193" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: FloatTensor</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TopKLogitsWarper"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TopKLogitsWarper</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TopKLogitsWarper" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TopKLogitsWarper"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L212" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">top_k<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">filter_value<span class="opacity-60">: float = -inf</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">min_tokens_to_keep<span class="opacity-60">: int = 1</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TopKLogitsWarper.top_k" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TopKLogitsWarper.top_k"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>top_k</strong> (<code>int</code>) &#x2014; The number of highest probability vocabulary tokens to keep for top-k-filtering.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TopKLogitsWarper.filter_value" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TopKLogitsWarper.filter_value"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>filter_value</strong> (<code>float</code>, <em>optional</em>, defaults to <code>-float(&quot;Inf&quot;)</code>) &#x2014; All filtered values will be set to this float value.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TopKLogitsWarper.min_tokens_to_keep" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TopKLogitsWarper.min_tokens_to_keep"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>min_tokens_to_keep</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Minimum number of tokens that cannot be filtered.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsWarper">LogitsWarper</a> that performs top-k, i.e. restricting to the k highest probability elements.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TopKLogitsWarper.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.TopKLogitsWarper.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TopKLogitsWarper.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L233" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: FloatTensor</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.NoRepeatNGramLogitsProcessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">NoRepeatNGramLogitsProcessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.NoRepeatNGramLogitsProcessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.NoRepeatNGramLogitsProcessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L309" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ngram_size<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.NoRepeatNGramLogitsProcessor.ngram_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.NoRepeatNGramLogitsProcessor.ngram_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ngram_size</strong> (<code>int</code>) &#x2014; All ngrams of size <code>ngram_size</code> can only occur once.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> that enforces no repetition of n-grams. See <a href="https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345" rel="nofollow">Fairseq</a>.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.NoRepeatNGramLogitsProcessor.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.NoRepeatNGramLogitsProcessor.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.NoRepeatNGramLogitsProcessor.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L324" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: FloatTensor</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.NoBadWordsLogitsProcessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">NoBadWordsLogitsProcessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.NoBadWordsLogitsProcessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.NoBadWordsLogitsProcessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L376" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bad_words_ids<span class="opacity-60">: typing.List[typing.List[int]]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.NoBadWordsLogitsProcessor.bad_words_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.NoBadWordsLogitsProcessor.bad_words_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bad_words_ids</strong> (<code>List[List[int]]</code>) &#x2014; List of list of token ids that are not allowed to be generated. In order to get the token ids of the words that should not appear in the generated text, use <code>tokenizer(bad_words, add_prefix_space=True, add_special_tokens=False).input_ids</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.NoBadWordsLogitsProcessor.eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.NoBadWordsLogitsProcessor.eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token_id</strong> (<code>int</code>) &#x2014; The id of the <em>end-of-sequence</em> token.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> that enforces that specified sequences will never be sampled.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.NoBadWordsLogitsProcessor.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.NoBadWordsLogitsProcessor.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.NoBadWordsLogitsProcessor.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L418" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: FloatTensor</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PrefixConstrainedLogitsProcessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">PrefixConstrainedLogitsProcessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.PrefixConstrainedLogitsProcessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PrefixConstrainedLogitsProcessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L504" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">prefix_allowed_tokens_fn<span class="opacity-60">: typing.Callable[[int, torch.Tensor], typing.List[int]]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_beams<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> that enforces constrained generation and is useful for prefix-conditioned constrained generation. See <a href="https://arxiv.org/abs/2010.00904" rel="nofollow">Autoregressive Entity Retrieval</a> for more information.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PrefixConstrainedLogitsProcessor.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.PrefixConstrainedLogitsProcessor.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PrefixConstrainedLogitsProcessor.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L521" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: FloatTensor</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.HammingDiversityLogitsProcessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">HammingDiversityLogitsProcessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.HammingDiversityLogitsProcessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.HammingDiversityLogitsProcessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L530" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">diversity_penalty<span class="opacity-60">: float</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_beams<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_beam_groups<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.HammingDiversityLogitsProcessor.diversity_penalty" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.HammingDiversityLogitsProcessor.diversity_penalty"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>diversity_penalty</strong> (<code>float</code>) &#x2014; This value is subtracted from a beam&#x2019;s score if it generates a token same as any beam from other group at a particular time. Note that <code>diversity_penalty</code> is only effective if <code>group beam search</code> is enabled.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.HammingDiversityLogitsProcessor.num_beams" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.HammingDiversityLogitsProcessor.num_beams"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_beams</strong> (<code>int</code>) &#x2014; Number of beams used for group beam search. See <a href="https://arxiv.org/pdf/1610.02424.pdf" rel="nofollow">this paper</a> for more details.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.HammingDiversityLogitsProcessor.num_beam_groups" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.HammingDiversityLogitsProcessor.num_beam_groups"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_beam_groups</strong> (<code>int</code>) &#x2014; Number of groups to divide <code>num_beams</code> into in order to ensure diversity among different groups of beams. See <a href="https://arxiv.org/pdf/1610.02424.pdf" rel="nofollow">this paper</a> for more details.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> that enforces diverse beam search. Note that this logits processor is only effective for <a href="/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.group_beam_search">PreTrainedModel.group_beam_search()</a>. See <a href="https://arxiv.org/pdf/1610.02424.pdf" rel="nofollow">Diverse Beam Search: Decoding Diverse Solutions from Neural Sequence Models</a> for more details.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.HammingDiversityLogitsProcessor.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.HammingDiversityLogitsProcessor.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.HammingDiversityLogitsProcessor.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L561" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: FloatTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">current_tokens<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">beam_group_idx<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ForcedBOSTokenLogitsProcessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ForcedBOSTokenLogitsProcessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ForcedBOSTokenLogitsProcessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ForcedBOSTokenLogitsProcessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L590" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bos_token_id<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ForcedBOSTokenLogitsProcessor.bos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ForcedBOSTokenLogitsProcessor.bos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bos_token_id</strong> (<code>int</code>) &#x2014; The id of the token to force as the first generated token.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> that enforces the specified token as the first generated token.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ForcedBOSTokenLogitsProcessor.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.ForcedBOSTokenLogitsProcessor.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ForcedBOSTokenLogitsProcessor.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L602" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: FloatTensor</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ForcedEOSTokenLogitsProcessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ForcedEOSTokenLogitsProcessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ForcedEOSTokenLogitsProcessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ForcedEOSTokenLogitsProcessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L611" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ForcedEOSTokenLogitsProcessor.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ForcedEOSTokenLogitsProcessor.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>) &#x2014; The maximum length of the sequence to be generated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ForcedEOSTokenLogitsProcessor.eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ForcedEOSTokenLogitsProcessor.eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token_id</strong> (<code>int</code>) &#x2014; The id of the token to force as the last generated token when <code>max_length</code> is reached.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> that enforces the specified token as the last generated token when <code>max_length</code> is reached.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ForcedEOSTokenLogitsProcessor.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.ForcedEOSTokenLogitsProcessor.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ForcedEOSTokenLogitsProcessor.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L626" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: FloatTensor</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.InfNanRemoveLogitsProcessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">InfNanRemoveLogitsProcessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.InfNanRemoveLogitsProcessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.InfNanRemoveLogitsProcessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L635" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.LogitsProcessor">LogitsProcessor</a> that removes all <code>nan</code> and <code>inf</code> values to avoid the generation method to fail. Note that using the logits processor should only be used if necessary since it can slow down the generation method. <code>max_length</code> is reached.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.InfNanRemoveLogitsProcessor.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.InfNanRemoveLogitsProcessor.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.InfNanRemoveLogitsProcessor.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_logits_process.py#L642" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: FloatTensor</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFLogitsProcessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFLogitsProcessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFLogitsProcessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFLogitsProcessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_tf_logits_process.py#L50" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Abstract base class for all logit processors that can be applied during generation.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFLogitsProcessor.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFLogitsProcessor.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFLogitsProcessor.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_tf_logits_process.py#L53" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: Tensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: Tensor</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>tf.Tensor</code> of shape <code>(batch_size, config.vocab_size)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLogitsProcessor.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLogitsProcessor.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLogitsProcessor.__call__.scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLogitsProcessor.__call__.scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scores</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs &#x2014; Additional logits processor specific kwargs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFLogitsProcessor.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>tf.Tensor</code> of shape <code>(batch_size, config.vocab_size)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The processed prediction scores.</p> <!-- HTML_TAG_END --></p></div></div> <p>TF method for processing logits.</p></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFLogitsProcessorList"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFLogitsProcessorList</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFLogitsProcessorList" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFLogitsProcessorList"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_tf_logits_process.py#L72" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">iterable<span class="opacity-60"> = ()</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>This class can be used to create a list of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.TFLogitsProcessor">TFLogitsProcessor</a> to subsequently process a <code>scores</code> input tensor. This class inherits from list and adds a specific <em><strong>call</strong></em> method to apply each <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.TFLogitsProcessor">TFLogitsProcessor</a> to the inputs.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFLogitsProcessorList.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFLogitsProcessorList.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFLogitsProcessorList.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_tf_logits_process.py#L79" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: Tensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: Tensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>tf.Tensor</code> of shape <code>(batch_size, config.vocab_size)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLogitsProcessorList.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLogitsProcessorList.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLogitsProcessorList.__call__.scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLogitsProcessorList.__call__.scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scores</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs &#x2014; Additional logits processor specific kwargs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFLogitsProcessorList.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>tf.Tensor</code> of shape <code>(batch_size, config.vocab_size)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The processed prediction scores.</p> <!-- HTML_TAG_END --></p></div></div></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFLogitsWarper"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFLogitsWarper</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFLogitsWarper" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFLogitsWarper"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_tf_logits_process.py#L61" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Abstract base class for all logit warpers that can be applied during generation with multinomial sampling.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFLogitsWarper.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFLogitsWarper.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFLogitsWarper.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_tf_logits_process.py#L64" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: Tensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: Tensor</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>tf.Tensor</code> of shape <code>(batch_size, config.vocab_size)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLogitsWarper.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLogitsWarper.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFLogitsWarper.__call__.scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFLogitsWarper.__call__.scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scores</strong> (<code>tf.Tensor</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs &#x2014; Additional logits processor specific kwargs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.TFLogitsWarper.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>tf.Tensor</code> of shape <code>(batch_size, config.vocab_size)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The processed prediction scores.</p> <!-- HTML_TAG_END --></p></div></div> <p>TF method for warping logits.</p></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFTemperatureLogitsWarper"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFTemperatureLogitsWarper</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFTemperatureLogitsWarper" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFTemperatureLogitsWarper"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_tf_logits_process.py#L95" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">temperature<span class="opacity-60">: float</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFTemperatureLogitsWarper.temperature" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFTemperatureLogitsWarper.temperature"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>temperature</strong> (<code>float</code>) &#x2014; The value used to module the logits distribution.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.TFLogitsWarper">TFLogitsWarper</a> for temperature (exponential scaling output probability distribution).</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFTemperatureLogitsWarper.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFTemperatureLogitsWarper.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFTemperatureLogitsWarper.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_tf_logits_process.py#L110" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: Tensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: Tensor</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFTopPLogitsWarper"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFTopPLogitsWarper</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFTopPLogitsWarper" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFTopPLogitsWarper"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_tf_logits_process.py#L144" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">top_p<span class="opacity-60">: float</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">filter_value<span class="opacity-60">: float = -inf</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">min_tokens_to_keep<span class="opacity-60">: int = 1</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFTopPLogitsWarper.top_p" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFTopPLogitsWarper.top_p"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>top_p</strong> (<code>float</code>) &#x2014; If set to &lt; 1, only the most probable tokens with probabilities that add up to <code>top_p</code> or higher are kept for generation.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFTopPLogitsWarper.filter_value" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFTopPLogitsWarper.filter_value"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>filter_value</strong> (<code>float</code>, <em>optional</em>, defaults to <code>-float(&quot;Inf&quot;)</code>) &#x2014; All filtered values will be set to this float value.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFTopPLogitsWarper.min_tokens_to_keep" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFTopPLogitsWarper.min_tokens_to_keep"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>min_tokens_to_keep</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Minimum number of tokens that cannot be filtered.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.TFLogitsWarper">TFLogitsWarper</a> that performs top-p, i.e. restricting to top tokens summing to &lt;= prob_cut_off.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFTopPLogitsWarper.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFTopPLogitsWarper.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFTopPLogitsWarper.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_tf_logits_process.py#L166" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: Tensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: Tensor</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFTopKLogitsWarper"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFTopKLogitsWarper</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFTopKLogitsWarper" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFTopKLogitsWarper"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_tf_logits_process.py#L115" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">top_k<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">filter_value<span class="opacity-60">: float = -inf</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">min_tokens_to_keep<span class="opacity-60">: int = 1</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFTopKLogitsWarper.top_k" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFTopKLogitsWarper.top_k"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>top_k</strong> (<code>int</code>) &#x2014; The number of highest probability vocabulary tokens to keep for top-k-filtering.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFTopKLogitsWarper.filter_value" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFTopKLogitsWarper.filter_value"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>filter_value</strong> (<code>float</code>, <em>optional</em>, defaults to <code>-float(&quot;Inf&quot;)</code>) &#x2014; All filtered values will be set to this float value.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFTopKLogitsWarper.min_tokens_to_keep" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFTopKLogitsWarper.min_tokens_to_keep"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>min_tokens_to_keep</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Minimum number of tokens that cannot be filtered.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.TFLogitsWarper">TFLogitsWarper</a> that performs top-k, i.e. restricting to the k highest probability elements.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFTopKLogitsWarper.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFTopKLogitsWarper.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFTopKLogitsWarper.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_tf_logits_process.py#L136" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: Tensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: Tensor</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFMinLengthLogitsProcessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFMinLengthLogitsProcessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFMinLengthLogitsProcessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFMinLengthLogitsProcessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_tf_logits_process.py#L198" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">min_length<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMinLengthLogitsProcessor.min_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMinLengthLogitsProcessor.min_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>min_length</strong> (<code>int</code>) &#x2014; The minimum length below which the score of <code>eos_token_id</code> is set to <code>-float(&quot;Inf&quot;)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFMinLengthLogitsProcessor.eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFMinLengthLogitsProcessor.eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token_id</strong> (<code>int</code>) &#x2014; The id of the <em>end-of-sequence</em> token.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.TFLogitsProcessor">TFLogitsProcessor</a> enforcing a min-length by setting EOS probability to 0.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFMinLengthLogitsProcessor.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFMinLengthLogitsProcessor.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFMinLengthLogitsProcessor.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_tf_logits_process.py#L219" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: Tensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: Tensor</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFNoBadWordsLogitsProcessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFNoBadWordsLogitsProcessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFNoBadWordsLogitsProcessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFNoBadWordsLogitsProcessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_tf_logits_process.py#L271" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bad_words_ids<span class="opacity-60">: typing.List[typing.List[int]]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFNoBadWordsLogitsProcessor.bad_words_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFNoBadWordsLogitsProcessor.bad_words_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bad_words_ids</strong> (<code>List[List[int]]</code>) &#x2014; List of list of token ids that are not allowed to be generated. In order to get the tokens of the words that should not appear in the generated text, use <code>tokenizer(bad_word, add_prefix_space=True).input_ids</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFNoBadWordsLogitsProcessor.eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFNoBadWordsLogitsProcessor.eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token_id</strong> (<code>int</code>) &#x2014; The id of the <em>end-of-sequence</em> token.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.TFLogitsProcessor">TFLogitsProcessor</a> that enforces that specified sequences will never be sampled.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFNoBadWordsLogitsProcessor.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFNoBadWordsLogitsProcessor.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFNoBadWordsLogitsProcessor.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_tf_logits_process.py#L334" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: Tensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: Tensor</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFNoRepeatNGramLogitsProcessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFNoRepeatNGramLogitsProcessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFNoRepeatNGramLogitsProcessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFNoRepeatNGramLogitsProcessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_tf_logits_process.py#L354" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ngram_size<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFNoRepeatNGramLogitsProcessor.ngram_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFNoRepeatNGramLogitsProcessor.ngram_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ngram_size</strong> (<code>int</code>) &#x2014; All ngrams of size <code>ngram_size</code> can only occur once.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.TFLogitsProcessor">TFLogitsProcessor</a> that enforces no repetition of n-grams. See <a href="https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345" rel="nofollow">Fairseq</a>.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFNoRepeatNGramLogitsProcessor.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFNoRepeatNGramLogitsProcessor.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFNoRepeatNGramLogitsProcessor.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_tf_logits_process.py#L392" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: Tensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: Tensor</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFRepetitionPenaltyLogitsProcessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TFRepetitionPenaltyLogitsProcessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TFRepetitionPenaltyLogitsProcessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFRepetitionPenaltyLogitsProcessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_tf_logits_process.py#L233" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">penalty<span class="opacity-60">: float</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TFRepetitionPenaltyLogitsProcessor.repetition_penalty" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TFRepetitionPenaltyLogitsProcessor.repetition_penalty"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>repetition_penalty</strong> (<code>float</code>) &#x2014; The parameter for repetition penalty. 1.0 means no penalty. See <a href="https://arxiv.org/pdf/1909.05858.pdf" rel="nofollow">this paper</a> for more details.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.TFLogitsProcessor">TFLogitsProcessor</a> enforcing an exponential penalty on repeated sequences.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TFRepetitionPenaltyLogitsProcessor.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.TFRepetitionPenaltyLogitsProcessor.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TFRepetitionPenaltyLogitsProcessor.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_tf_logits_process.py#L262" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: Tensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: Tensor</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxLogitsProcessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FlaxLogitsProcessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FlaxLogitsProcessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxLogitsProcessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_flax_logits_process.py#L50" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Abstract base class for all logit processors that can be applied during generation.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxLogitsProcessor.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxLogitsProcessor.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxLogitsProcessor.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_flax_logits_process.py#L53" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: ndarray</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>jnp.ndarray</code> of shape <code>(batch_size, config.vocab_size)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxLogitsProcessor.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxLogitsProcessor.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxLogitsProcessor.__call__.scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxLogitsProcessor.__call__.scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scores</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs &#x2014; Additional logits processor specific kwargs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.FlaxLogitsProcessor.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>jnp.ndarray</code> of shape <code>(batch_size, config.vocab_size)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The processed prediction scores.</p> <!-- HTML_TAG_END --></p></div></div> <p>Flax method for processing logits.</p></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxLogitsProcessorList"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FlaxLogitsProcessorList</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FlaxLogitsProcessorList" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxLogitsProcessorList"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_flax_logits_process.py#L72" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">iterable<span class="opacity-60"> = ()</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>This class can be used to create a list of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.FlaxLogitsProcessor">FlaxLogitsProcessor</a> or <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.FlaxLogitsWarper">FlaxLogitsWarper</a> to subsequently process a <code>scores</code> input tensor. This class inherits from list and adds a specific <em><strong>call</strong></em> method to apply each <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.FlaxLogitsProcessor">FlaxLogitsProcessor</a> or <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.FlaxLogitsWarper">FlaxLogitsWarper</a> to the inputs.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxLogitsProcessorList.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxLogitsProcessorList.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxLogitsProcessorList.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_flax_logits_process.py#L79" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cur_len<span class="opacity-60">: int</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>jnp.ndarray</code> of shape <code>(batch_size, config.vocab_size)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxLogitsProcessorList.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxLogitsProcessorList.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxLogitsProcessorList.__call__.scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxLogitsProcessorList.__call__.scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scores</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs &#x2014; Additional logits processor specific kwargs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.FlaxLogitsProcessorList.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>jnp.ndarray</code> of shape <code>(batch_size, config.vocab_size)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The processed prediction scores.</p> <!-- HTML_TAG_END --></p></div></div></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxLogitsWarper"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FlaxLogitsWarper</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FlaxLogitsWarper" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxLogitsWarper"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_flax_logits_process.py#L61" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Abstract base class for all logit warpers that can be applied during generation with multinomial sampling.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxLogitsWarper.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxLogitsWarper.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxLogitsWarper.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_flax_logits_process.py#L64" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: ndarray</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>jnp.ndarray</code> of shape <code>(batch_size, config.vocab_size)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxLogitsWarper.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxLogitsWarper.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxLogitsWarper.__call__.scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxLogitsWarper.__call__.scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scores</strong> (<code>jnp.ndarray</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs &#x2014; Additional logits processor specific kwargs.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.FlaxLogitsWarper.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>jnp.ndarray</code> of shape <code>(batch_size, config.vocab_size)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The processed prediction scores.</p> <!-- HTML_TAG_END --></p></div></div> <p>Flax method for warping logits.</p></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxTemperatureLogitsWarper"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FlaxTemperatureLogitsWarper</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FlaxTemperatureLogitsWarper" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxTemperatureLogitsWarper"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_flax_logits_process.py#L95" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">temperature<span class="opacity-60">: float</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxTemperatureLogitsWarper.temperature" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxTemperatureLogitsWarper.temperature"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>temperature</strong> (<code>float</code>) &#x2014; The value used to module the logits distribution.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.FlaxLogitsWarper">FlaxLogitsWarper</a> for temperature (exponential scaling output probability distribution).</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxTemperatureLogitsWarper.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxTemperatureLogitsWarper.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxTemperatureLogitsWarper.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_flax_logits_process.py#L110" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cur_len<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxTopPLogitsWarper"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FlaxTopPLogitsWarper</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FlaxTopPLogitsWarper" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxTopPLogitsWarper"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_flax_logits_process.py#L115" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">top_p<span class="opacity-60">: float</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">filter_value<span class="opacity-60">: float = -inf</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">min_tokens_to_keep<span class="opacity-60">: int = 1</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxTopPLogitsWarper.top_p" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxTopPLogitsWarper.top_p"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>top_p</strong> (<code>float</code>) &#x2014; If set to &lt; 1, only the most probable tokens with probabilities that add up to <code>top_p</code> or higher are kept for generation.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxTopPLogitsWarper.filter_value" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxTopPLogitsWarper.filter_value"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>filter_value</strong> (<code>float</code>, <em>optional</em>, defaults to <code>-float(&quot;Inf&quot;)</code>) &#x2014; All filtered values will be set to this float value.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxTopPLogitsWarper.min_tokens_to_keep" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxTopPLogitsWarper.min_tokens_to_keep"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>min_tokens_to_keep</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Minimum number of tokens that cannot be filtered.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.FlaxLogitsWarper">FlaxLogitsWarper</a> that performs top-p, i.e. restricting to top tokens summing to prob_cut_off &lt;= prob_cut_off.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxTopPLogitsWarper.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxTopPLogitsWarper.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxTopPLogitsWarper.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_flax_logits_process.py#L137" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cur_len<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxTopKLogitsWarper"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FlaxTopKLogitsWarper</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FlaxTopKLogitsWarper" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxTopKLogitsWarper"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_flax_logits_process.py#L156" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">top_k<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">filter_value<span class="opacity-60">: float = -inf</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">min_tokens_to_keep<span class="opacity-60">: int = 1</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxTopKLogitsWarper.top_k" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxTopKLogitsWarper.top_k"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>top_k</strong> (<code>int</code>) &#x2014; The number of highest probability vocabulary tokens to keep for top-k-filtering.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxTopKLogitsWarper.filter_value" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxTopKLogitsWarper.filter_value"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>filter_value</strong> (<code>float</code>, <em>optional</em>, defaults to <code>-float(&quot;Inf&quot;)</code>) &#x2014; All filtered values will be set to this float value.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxTopKLogitsWarper.min_tokens_to_keep" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxTopKLogitsWarper.min_tokens_to_keep"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>min_tokens_to_keep</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Minimum number of tokens that cannot be filtered.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.FlaxLogitsWarper">FlaxLogitsWarper</a> that performs top-k, i.e. restricting to the k highest probability elements.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxTopKLogitsWarper.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxTopKLogitsWarper.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxTopKLogitsWarper.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_flax_logits_process.py#L177" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cur_len<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxForcedBOSTokenLogitsProcessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FlaxForcedBOSTokenLogitsProcessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FlaxForcedBOSTokenLogitsProcessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxForcedBOSTokenLogitsProcessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_flax_logits_process.py#L192" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">bos_token_id<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxForcedBOSTokenLogitsProcessor.bos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxForcedBOSTokenLogitsProcessor.bos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bos_token_id</strong> (<code>int</code>) &#x2014; The id of the token to force as the first generated token.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.FlaxLogitsProcessor">FlaxLogitsProcessor</a> that enforces the specified token as the first generated token.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxForcedBOSTokenLogitsProcessor.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxForcedBOSTokenLogitsProcessor.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxForcedBOSTokenLogitsProcessor.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_flax_logits_process.py#L204" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cur_len<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxForcedEOSTokenLogitsProcessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FlaxForcedEOSTokenLogitsProcessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FlaxForcedEOSTokenLogitsProcessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxForcedEOSTokenLogitsProcessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_flax_logits_process.py#L216" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxForcedEOSTokenLogitsProcessor.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxForcedEOSTokenLogitsProcessor.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>) &#x2014; The maximum length of the sequence to be generated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxForcedEOSTokenLogitsProcessor.eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxForcedEOSTokenLogitsProcessor.eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token_id</strong> (<code>int</code>) &#x2014; The id of the token to force as the last generated token when <code>max_length</code> is reached.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.FlaxLogitsProcessor">FlaxLogitsProcessor</a> that enforces the specified token as the last generated token when <code>max_length</code> is reached.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxForcedEOSTokenLogitsProcessor.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxForcedEOSTokenLogitsProcessor.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxForcedEOSTokenLogitsProcessor.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_flax_logits_process.py#L231" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cur_len<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxMinLengthLogitsProcessor"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">FlaxMinLengthLogitsProcessor</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.FlaxMinLengthLogitsProcessor" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxMinLengthLogitsProcessor"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_flax_logits_process.py#L243" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">min_length<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMinLengthLogitsProcessor.min_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMinLengthLogitsProcessor.min_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>min_length</strong> (<code>int</code>) &#x2014; The minimum length below which the score of <code>eos_token_id</code> is set to <code>-float(&quot;Inf&quot;)</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.FlaxMinLengthLogitsProcessor.eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.FlaxMinLengthLogitsProcessor.eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token_id</strong> (<code>int</code>) &#x2014; The id of the <em>end-of-sequence</em> token.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.FlaxLogitsProcessor">FlaxLogitsProcessor</a> enforcing a min-length by setting EOS probability to 0.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.FlaxMinLengthLogitsProcessor.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.FlaxMinLengthLogitsProcessor.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.FlaxMinLengthLogitsProcessor.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_flax_logits_process.py#L264" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: ndarray</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">cur_len<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <h2 class="relative group"><a id="transformers.StoppingCriteria" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.StoppingCriteria"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>StoppingCriteria </span></h2> <p>A <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.StoppingCriteria">StoppingCriteria</a> can be used to change when to stop generation (other than EOS token).</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.StoppingCriteria"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">StoppingCriteria</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.StoppingCriteria" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.StoppingCriteria"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_stopping_criteria.py#L33" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Abstract base class for all stopping criteria that can be applied during generation.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.StoppingCriteria.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.StoppingCriteria.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.StoppingCriteria.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_stopping_criteria.py#L36" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: FloatTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.StoppingCriteria.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.StoppingCriteria.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.StoppingCriteria.__call__.scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.StoppingCriteria.__call__.scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs &#x2014; Additional stopping criteria specific kwargs.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.StoppingCriteriaList"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">StoppingCriteriaList</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.StoppingCriteriaList" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.StoppingCriteriaList"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_stopping_criteria.py#L110" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">iterable<span class="opacity-60"> = ()</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.StoppingCriteriaList.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.StoppingCriteriaList.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.StoppingCriteriaList.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_stopping_criteria.py#L111" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: FloatTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.StoppingCriteriaList.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.StoppingCriteriaList.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.StoppingCriteriaList.__call__.scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.StoppingCriteriaList.__call__.scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs &#x2014; Additional stopping criteria specific kwargs.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MaxLengthCriteria"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">MaxLengthCriteria</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.MaxLengthCriteria" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MaxLengthCriteria"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_stopping_criteria.py#L41" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaxLengthCriteria.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaxLengthCriteria.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>) &#x2014; The maximum length that the output sequence can have in number of tokens.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>This class can be used to stop generation whenever the full generated number of tokens exceeds <code>max_length</code>. Keep in mind for decoder-only type of transformers, this will include the initial prompted tokens.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MaxLengthCriteria.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.MaxLengthCriteria.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MaxLengthCriteria.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_stopping_criteria.py#L54" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: FloatTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaxLengthCriteria.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaxLengthCriteria.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaxLengthCriteria.__call__.scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaxLengthCriteria.__call__.scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs &#x2014; Additional stopping criteria specific kwargs.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MaxTimeCriteria"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">MaxTimeCriteria</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.MaxTimeCriteria" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MaxTimeCriteria"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_stopping_criteria.py#L88" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_time<span class="opacity-60">: float</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">initial_timestamp<span class="opacity-60">: typing.Optional[float] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaxTimeCriteria.max_time" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaxTimeCriteria.max_time"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_time</strong> (<code>float</code>) &#x2014; The maximum allowed time in seconds for the generation.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaxTimeCriteria.initial_time" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaxTimeCriteria.initial_time"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>initial_time</strong> (<code>float</code>, <em>optional</em>, defaults to <code>time.time()</code>) &#x2014; The start of the generation allowed time.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>This class can be used to stop generation whenever the full generation exceeds some amount of time. By default, the time will start being counted when you initialize this function. You can override this by passing an <code>initial_time</code>.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.MaxTimeCriteria.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.MaxTimeCriteria.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.MaxTimeCriteria.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_stopping_criteria.py#L105" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores<span class="opacity-60">: FloatTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaxTimeCriteria.__call__.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaxTimeCriteria.__call__.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.MaxTimeCriteria.__call__.scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.MaxTimeCriteria.__call__.scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, config.vocab_size)</code>) &#x2014; Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs &#x2014; Additional stopping criteria specific kwargs.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div></div></div> <h2 class="relative group"><a id="transformers.Constraint" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.Constraint"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Constraints </span></h2> <p>A <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.Constraint">Constraint</a> can be used to force the generation to include specific tokens or sequences in the output.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Constraint"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">Constraint</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.Constraint" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Constraint"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_beam_constraints.py#L5" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Abstract base class for all constraints that can be applied during generation. It must define how the constraint can be satisfied.</p> <p>All classes that inherit Constraint must follow the requirement that</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->completed = <span class="hljs-literal">False</span> <span class="hljs-keyword">while</span> <span class="hljs-keyword">not</span> completed: _, completed = constraint.update(constraint.advance())<!-- HTML_TAG_END --></pre></div> <p>will always terminate (halt).</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Constraint.advance"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>advance</span></h4><!-- HTML_TAG_END --> <a id="transformers.Constraint.advance" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Constraint.advance"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_beam_constraints.py#L48" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>token_ids(<code>torch.tensor</code>)</span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.Constraint.advance.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>token_ids(<code>torch.tensor</code>)</p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Must be a tensor of a list of indexable tokens, not some integer.</p> <!-- HTML_TAG_END --></p></div></div> <p>When called, returns the token that would take this constraint one step closer to being fulfilled.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Constraint.copy"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>copy</span></h4><!-- HTML_TAG_END --> <a id="transformers.Constraint.copy" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Constraint.copy"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_beam_constraints.py#L113" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">stateful<span class="opacity-60"> = False</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>constraint(<code>Constraint</code>)</span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.Constraint.copy.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>constraint(<code>Constraint</code>)</p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The same constraint as the one being called from.</p> <!-- HTML_TAG_END --></p></div></div> <p>Creates a new instance of this constraint.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Constraint.does_advance"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>does_advance</span></h4><!-- HTML_TAG_END --> <a id="transformers.Constraint.does_advance" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Constraint.does_advance"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_beam_constraints.py#L60" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_id<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Reads in a token and returns whether it creates progress.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Constraint.remaining"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>remaining</span></h4><!-- HTML_TAG_END --> <a id="transformers.Constraint.remaining" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Constraint.remaining"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_beam_constraints.py#L104" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Returns the number of remaining steps of <code>advance()</code> in order to complete this constraint.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Constraint.reset"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>reset</span></h4><!-- HTML_TAG_END --> <a id="transformers.Constraint.reset" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Constraint.reset"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_beam_constraints.py#L94" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Resets the state of this constraint to its initialization. We would call this in cases where the fulfillment of a constraint is abrupted by an unwanted token.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Constraint.test"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>test</span></h4><!-- HTML_TAG_END --> <a id="transformers.Constraint.test" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Constraint.test"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_beam_constraints.py#L24" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Tests whether this constraint has been properly defined.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.Constraint.update"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>update</span></h4><!-- HTML_TAG_END --> <a id="transformers.Constraint.update" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.Constraint.update"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_beam_constraints.py#L69" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_id<span class="opacity-60">: int</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>stepped(<code>bool</code>)</span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.Constraint.update.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>stepped(<code>bool</code>)</p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Whether this constraint has become one step closer to being fulfuilled. completed(<code>bool</code>): Whether this constraint has been completely fulfilled by this token being generated. reset (<code>bool</code>): Whether this constraint has reset its progress by this token being generated.</p> <!-- HTML_TAG_END --></p></div></div> <p>Reads in a token and returns booleans that indicate the progress made by it. This function will update the state of this object unlikes <code>does_advance(self, token_id: int)</code>.</p> <p>This isn’t to test whether a certain token will advance the progress; it’s to update its state as if it has been generated. This becomes important if token_id != desired token (refer to else statement in PhrasalConstraint)</p></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PhrasalConstraint"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">PhrasalConstraint</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.PhrasalConstraint" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PhrasalConstraint"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_beam_constraints.py#L129" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids<span class="opacity-60">: typing.List[int]</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PhrasalConstraint.token_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PhrasalConstraint.token_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids</strong> (<code>List[int]</code>) &#x2014; The id of the token that must be generated by the output.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.Constraint">Constraint</a> enforcing that an ordered sequence of tokens is included in the output.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.DisjunctiveConstraint"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">DisjunctiveConstraint</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.DisjunctiveConstraint" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.DisjunctiveConstraint"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_beam_constraints.py#L260" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">nested_token_ids<span class="opacity-60">: typing.List[typing.List[int]]</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DisjunctiveConstraint.nested_token_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DisjunctiveConstraint.nested_token_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>nested_token_ids</strong> (<code>List[List[int]]</code>) &#x2014; a list of words, where each word is a list of ids. This constraint<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.DisjunctiveConstraint.is" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.DisjunctiveConstraint.is"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>is</strong> fulfilled by generating just one from the list of words. &#x2014;<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>A special <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.Constraint">Constraint</a> that is fulfilled by fulfilling just one of several constraints.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ConstraintListState"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ConstraintListState</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ConstraintListState" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ConstraintListState"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_beam_constraints.py#L349" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">constraints<span class="opacity-60">: typing.List[transformers.generation_beam_constraints.Constraint]</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConstraintListState.constraints" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConstraintListState.constraints"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>constraints</strong> (<code>List[Constraint]</code>) &#x2014; A list of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.Constraint">Constraint</a> objects that must be fulfilled by the beam scorer.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>A class for beam scorers to track its progress through a list of constraints.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ConstraintListState.advance"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>advance</span></h4><!-- HTML_TAG_END --> <a id="transformers.ConstraintListState.advance" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ConstraintListState.advance"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_beam_constraints.py#L381" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>The list of tokens to generate such that we can make progress. By “list” we don’t mean the list of token that will fully fulfill a constraint.</p> <p>Given constraints <code>c_i = {t_ij | j == # of tokens}</code>, If we’re not in the middle of progressing through a specific constraint <code>c_i</code>, we return:</p> <p><code>[t_k1 for k in indices of unfulfilled constraints]</code></p> <p>If we are in the middle of a constraint, then we return: <code>[t_ij]</code>, where <code>i</code> is the index of the inprogress constraint, <code>j</code> is the next step for the constraint.</p> <p>Though we don’t care which constraint is fulfilled first, if we are in the progress of fulfilling a constraint, that’s the only one we’ll return.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ConstraintListState.reset"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>reset</span></h4><!-- HTML_TAG_END --> <a id="transformers.ConstraintListState.reset" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ConstraintListState.reset"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_beam_constraints.py#L416" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids<span class="opacity-60">: typing.Optional[typing.List[int]]</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>token_ids: the tokens generated thus far to reset the state of the progress through constraints.</p></div></div> <h2 class="relative group"><a id="transformers.BeamScorer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BeamScorer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>BeamSearch </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BeamScorer"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">BeamScorer</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.BeamScorer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BeamScorer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_beam_search.py#L88" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Abstract base class for all beam scorers that are used for <a href="/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.beam_search">beam_search()</a> and <a href="/docs/transformers/pr_16143/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.beam_sample">beam_sample()</a>.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BeamScorer.process"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>process</span></h4><!-- HTML_TAG_END --> <a id="transformers.BeamScorer.process" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BeamScorer.process"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_beam_search.py#L94" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">next_scores<span class="opacity-60">: FloatTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">next_tokens<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">next_indices<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>UserDict</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BeamScorer.process.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BeamScorer.process.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size * num_beams, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using any class inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BeamScorer.process.next_scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BeamScorer.process.next_scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>next_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, 2 * num_beams)</code>) &#x2014; Current scores of the top <code>2 * num_beams</code> non-finished beam hypotheses.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BeamScorer.process.next_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BeamScorer.process.next_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>next_tokens</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, 2 * num_beams)</code>) &#x2014; <code>input_ids</code> of the tokens corresponding to the top <code>2 * num_beams</code> non-finished beam hypotheses.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BeamScorer.process.next_indices" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BeamScorer.process.next_indices"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>next_indices</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, 2 * num_beams)</code>) &#x2014; Beam indices indicating to which beam hypothesis the <code>next_tokens</code> correspond.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BeamScorer.process.pad_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BeamScorer.process.pad_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BeamScorer.process.eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BeamScorer.process.eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.BeamScorer.process.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>UserDict</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A dictionary composed of the fields as defined above:</p> <ul> <li><strong>next_beam_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_beams)</code>) — Updated scores of all non-finished beams.</li> <li><strong>next_beam_tokens</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_beams)</code>) — Next tokens to be added to the non-finished beam_hypotheses.</li> <li><strong>next_beam_indices</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_beams)</code>) — Beam indices indicating to which beam the next tokens shall be added.</li> </ul> <!-- HTML_TAG_END --></p></div></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BeamScorer.finalize"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>finalize</span></h4><!-- HTML_TAG_END --> <a id="transformers.BeamScorer.finalize" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BeamScorer.finalize"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_beam_search.py#L106" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">next_scores<span class="opacity-60">: FloatTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">next_tokens<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">next_indices<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: int</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>torch.LongTensor</code> of shape <code>(batch_size * num_return_sequences, sequence_length)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BeamScorer.finalize.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BeamScorer.finalize.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size * num_beams, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using any class inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BeamScorer.finalize.final_beam_scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BeamScorer.finalize.final_beam_scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>final_beam_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_beams)</code>) &#x2014; The final scores of all non-finished beams.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BeamScorer.finalize.final_beam_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BeamScorer.finalize.final_beam_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>final_beam_tokens</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_beams)</code>) &#x2014; The last tokens to be added to the non-finished beam_hypotheses.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BeamScorer.finalize.final_beam_indices" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BeamScorer.finalize.final_beam_indices"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>final_beam_indices</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_beams)</code>) &#x2014; The beam indices indicating to which beam the <code>final_beam_tokens</code> shall be added.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BeamScorer.finalize.pad_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BeamScorer.finalize.pad_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BeamScorer.finalize.eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BeamScorer.finalize.eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.BeamScorer.finalize.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>torch.LongTensor</code> of shape <code>(batch_size * num_return_sequences, sequence_length)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The generated sequences. The second dimension (sequence_length) is either equal to <code>max_length</code> or shorter if all batches finished early due to the <code>eos_token_id</code>.</p> <!-- HTML_TAG_END --></p></div></div></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BeamSearchScorer"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">BeamSearchScorer</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.BeamSearchScorer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BeamSearchScorer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_beam_search.py#L120" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">batch_size<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_beams<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">device<span class="opacity-60">: device</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">length_penalty<span class="opacity-60">: typing.Optional[float] = 1.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_early_stopping<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_beam_hyps_to_keep<span class="opacity-60">: typing.Optional[int] = 1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_beam_groups<span class="opacity-60">: typing.Optional[int] = 1</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BeamSearchScorer.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BeamSearchScorer.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>) &#x2014; Batch Size of <code>input_ids</code> for which standard beam search decoding is run in parallel.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BeamSearchScorer.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BeamSearchScorer.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>) &#x2014; The maximum length of the sequence to be generated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BeamSearchScorer.num_beams" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BeamSearchScorer.num_beams"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_beams</strong> (<code>int</code>) &#x2014; Number of beams for beam search.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BeamSearchScorer.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BeamSearchScorer.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>torch.device</code>) &#x2014; Defines the device type (<em>e.g.</em>, <code>&quot;cpu&quot;</code> or <code>&quot;cuda&quot;</code>) on which this instance of <code>BeamSearchScorer</code> will be allocated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BeamSearchScorer.length_penalty" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BeamSearchScorer.length_penalty"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>length_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; Exponential penalty to the length. 1.0 means no penalty. Set to values &lt; 1.0 in order to encourage the model to generate shorter sequences, to a value &gt; 1.0 in order to encourage the model to produce longer sequences.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BeamSearchScorer.do_early_stopping" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BeamSearchScorer.do_early_stopping"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>do_early_stopping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to stop the beam search when at least <code>num_beams</code> sentences are finished per batch or not.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BeamSearchScorer.num_beam_hyps_to_keep" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BeamSearchScorer.num_beam_hyps_to_keep"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_beam_hyps_to_keep</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; The number of beam hypotheses that shall be returned upon calling <code>finalize</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.BeamSearchScorer.num_beam_groups" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.BeamSearchScorer.num_beam_groups"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_beam_groups</strong> (<code>int</code>) &#x2014; Number of groups to divide <code>num_beams</code> into in order to ensure diversity among different groups of beams. See <a href="https://arxiv.org/pdf/1610.02424.pdf" rel="nofollow">this paper</a> for more details.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.BeamScorer">BeamScorer</a> implementing standard beam search decoding.</p> <p>Adapted in part from <a href="https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529" rel="nofollow">Facebook’s XLM beam search code</a>.</p> <p>Reference for the diverse beam search algorithm and implementation <a href="https://github.com/ashwinkalyan/dbs/blob/master/dbs/beam_utils.lua" rel="nofollow">Ashwin Kalyan’s DBS implementation</a></p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BeamSearchScorer.process"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>process</span></h4><!-- HTML_TAG_END --> <a id="transformers.BeamSearchScorer.process" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BeamSearchScorer.process"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_beam_search.py#L206" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">next_scores<span class="opacity-60">: FloatTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">next_tokens<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">next_indices<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.BeamSearchScorer.finalize"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>finalize</span></h4><!-- HTML_TAG_END --> <a id="transformers.BeamSearchScorer.finalize" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.BeamSearchScorer.finalize"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_beam_search.py#L291" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">final_beam_scores<span class="opacity-60">: FloatTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">final_beam_tokens<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">final_beam_indices<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: int</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ConstrainedBeamSearchScorer"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">ConstrainedBeamSearchScorer</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.ConstrainedBeamSearchScorer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ConstrainedBeamSearchScorer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_beam_search.py#L356" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">batch_size<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_beams<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">constraints<span class="opacity-60">: typing.List[transformers.generation_beam_constraints.Constraint]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">device<span class="opacity-60">: device</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">length_penalty<span class="opacity-60">: typing.Optional[float] = 1.0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">do_early_stopping<span class="opacity-60">: typing.Optional[bool] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_beam_hyps_to_keep<span class="opacity-60">: typing.Optional[int] = 1</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_beam_groups<span class="opacity-60">: typing.Optional[int] = 1</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConstrainedBeamSearchScorer.batch_size" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConstrainedBeamSearchScorer.batch_size"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_size</strong> (<code>int</code>) &#x2014; Batch Size of <code>input_ids</code> for which standard beam search decoding is run in parallel.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConstrainedBeamSearchScorer.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConstrainedBeamSearchScorer.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>) &#x2014; The maximum length of the sequence to be generated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConstrainedBeamSearchScorer.num_beams" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConstrainedBeamSearchScorer.num_beams"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_beams</strong> (<code>int</code>) &#x2014; Number of beams for beam search.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConstrainedBeamSearchScorer.constraints" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConstrainedBeamSearchScorer.constraints"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>constraints</strong> (<code>List[Constraint]</code>) &#x2014; A list of positive constraints represented as <code>Constraint</code> objects that must be fulfilled in the generation output. For more information, the documentation of <a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.Constraint">Constraint</a> should be read.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConstrainedBeamSearchScorer.device" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConstrainedBeamSearchScorer.device"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>device</strong> (<code>torch.device</code>) &#x2014; Defines the device type (<em>e.g.</em>, <code>&quot;cpu&quot;</code> or <code>&quot;cuda&quot;</code>) on which this instance of <code>BeamSearchScorer</code> will be allocated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConstrainedBeamSearchScorer.length_penalty" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConstrainedBeamSearchScorer.length_penalty"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>length_penalty</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; Exponential penalty to the length. 1.0 means no penalty. Set to values &lt; 1.0 in order to encourage the model to generate shorter sequences, to a value &gt; 1.0 in order to encourage the model to produce longer sequences.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConstrainedBeamSearchScorer.do_early_stopping" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConstrainedBeamSearchScorer.do_early_stopping"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>do_early_stopping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether to stop the beam search when at least <code>num_beams</code> sentences are finished per batch or not.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConstrainedBeamSearchScorer.num_beam_hyps_to_keep" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConstrainedBeamSearchScorer.num_beam_hyps_to_keep"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_beam_hyps_to_keep</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; The number of beam hypotheses that shall be returned upon calling <code>finalize</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConstrainedBeamSearchScorer.num_beam_groups" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConstrainedBeamSearchScorer.num_beam_groups"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_beam_groups</strong> (<code>int</code>) &#x2014; Number of groups to divide <code>num_beams</code> into in order to ensure diversity among different groups of beams. See <a href="https://arxiv.org/pdf/1610.02424.pdf" rel="nofollow">this paper</a> for more details.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p><a href="/docs/transformers/pr_16143/en/internal/generation_utils#transformers.BeamScorer">BeamScorer</a> implementing constrained beam search decoding.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ConstrainedBeamSearchScorer.process"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>process</span></h4><!-- HTML_TAG_END --> <a id="transformers.ConstrainedBeamSearchScorer.process" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ConstrainedBeamSearchScorer.process"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_beam_search.py#L450" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">next_scores<span class="opacity-60">: FloatTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">next_tokens<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">next_indices<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">scores_for_all_vocab<span class="opacity-60">: FloatTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>UserDict</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConstrainedBeamSearchScorer.process.input_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConstrainedBeamSearchScorer.process.input_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>input_ids</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size * num_beams, sequence_length)</code>) &#x2014; Indices of input sequence tokens in the vocabulary.</p> <p>Indices can be obtained using any class inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a>. See <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.encode">PreTrainedTokenizer.encode()</a> and <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizer.<strong>call</strong>()</a> for details.</p> <p><a href="../glossary#input-ids">What are input IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConstrainedBeamSearchScorer.process.next_scores" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConstrainedBeamSearchScorer.process.next_scores"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>next_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size, 2 * num_beams)</code>) &#x2014; Current scores of the top <code>2 * num_beams</code> non-finished beam hypotheses.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConstrainedBeamSearchScorer.process.next_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConstrainedBeamSearchScorer.process.next_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>next_tokens</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, 2 * num_beams)</code>) &#x2014; <code>input_ids</code> of the tokens corresponding to the top <code>2 * num_beams</code> non-finished beam hypotheses.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConstrainedBeamSearchScorer.process.next_indices" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConstrainedBeamSearchScorer.process.next_indices"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>next_indices</strong> (<code>torch.LongTensor</code> of shape <code>(batch_size, 2 * num_beams)</code>) &#x2014; Beam indices indicating to which beam hypothesis the <code>next_tokens</code> correspond.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConstrainedBeamSearchScorer.process.scores_for_all_vocab" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConstrainedBeamSearchScorer.process.scores_for_all_vocab"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>scores_for_all_vocab</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_beams, sequence_length)</code>) &#x2014; The scores of all tokens in the vocabulary for each of the beam hypotheses.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConstrainedBeamSearchScorer.process.pad_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConstrainedBeamSearchScorer.process.pad_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>padding</em> token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.ConstrainedBeamSearchScorer.process.eos_token_id" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.ConstrainedBeamSearchScorer.process.eos_token_id"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token_id</strong> (<code>int</code>, <em>optional</em>) &#x2014; The id of the <em>end-of-sequence</em> token.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.ConstrainedBeamSearchScorer.process.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>UserDict</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A dictionary composed of the fields as defined above:</p> <ul> <li> <p><strong>next_beam_scores</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_beams)</code>) — Updated scores of all non-finished beams.</p> </li> <li> <p><strong>next_beam_tokens</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_beams)</code>) — Next tokens to be added to the non-finished beam_hypotheses.</p> </li> <li> <p><strong>next_beam_indices</strong> (<code>torch.FloatTensor</code> of shape <code>(batch_size * num_beams)</code>) — Beam indices indicating to which beam the next tokens shall be added.</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.ConstrainedBeamSearchScorer.finalize"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>finalize</span></h4><!-- HTML_TAG_END --> <a id="transformers.ConstrainedBeamSearchScorer.finalize" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.ConstrainedBeamSearchScorer.finalize"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_beam_search.py#L725" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">input_ids<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">final_beam_scores<span class="opacity-60">: FloatTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">final_beam_tokens<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">final_beam_indices<span class="opacity-60">: LongTensor</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: int</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">eos_token_id<span class="opacity-60">: typing.Optional[int] = None</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div></div></div> <h2 class="relative group"><a id="transformers.top_k_top_p_filtering" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.top_k_top_p_filtering"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Utilities </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.top_k_top_p_filtering"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.top_k_top_p_filtering</span></h4><!-- HTML_TAG_END --> <a id="transformers.top_k_top_p_filtering" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.top_k_top_p_filtering"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_utils.py#L3290" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60">: FloatTensor</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">top_k<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">top_p<span class="opacity-60">: float = 1.0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">filter_value<span class="opacity-60">: float = -inf</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">min_tokens_to_keep<span class="opacity-60">: int = 1</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.top_k_top_p_filtering.top_k" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.top_k_top_p_filtering.top_k"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>top_k</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If &gt; 0, only keep the top k tokens with highest probability (top-k filtering)<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.top_k_top_p_filtering.top_p" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.top_k_top_p_filtering.top_p"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>top_p</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; If &lt; 1.0, only keep the top tokens with cumulative probability &gt;= top_p (nucleus filtering). Nucleus filtering is described in Holtzman et al. (<a href="http://arxiv.org/abs/1904.09751" rel="nofollow">http://arxiv.org/abs/1904.09751</a>)<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.top_k_top_p_filtering.min_tokens_to_keep" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.top_k_top_p_filtering.min_tokens_to_keep"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>min_tokens_to_keep</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Minimumber of tokens we keep per batch example in the output.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Filter a distribution of logits using top-k and/or nucleus (top-p) filtering</p> <p>From: <a href="https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317" rel="nofollow">https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317</a></p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.tf_top_k_top_p_filtering"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>transformers.tf_top_k_top_p_filtering</span></h4><!-- HTML_TAG_END --> <a id="transformers.tf_top_k_top_p_filtering" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.tf_top_k_top_p_filtering"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/generation_tf_utils.py#L2270" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">logits<span class="opacity-60"></span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">top_k<span class="opacity-60"> = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">top_p<span class="opacity-60"> = 1.0</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">filter_value<span class="opacity-60"> = -inf</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">min_tokens_to_keep<span class="opacity-60"> = 1</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.tf_top_k_top_p_filtering.top_k" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.tf_top_k_top_p_filtering.top_k"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>top_k</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If &gt; 0, only keep the top k tokens with highest probability (top-k filtering)<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.tf_top_k_top_p_filtering.top_p" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.tf_top_k_top_p_filtering.top_p"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>top_p</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) &#x2014; If &lt; 1.0, only keep the top tokens with cumulative probability &gt;= top_p (nucleus filtering). Nucleus filtering is described in Holtzman et al. (<a href="http://arxiv.org/abs/1904.09751" rel="nofollow">http://arxiv.org/abs/1904.09751</a>)<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.tf_top_k_top_p_filtering.min_tokens_to_keep" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.tf_top_k_top_p_filtering.min_tokens_to_keep"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>min_tokens_to_keep</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014; Minimumber of tokens we keep per batch example in the output.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Filter a distribution of logits using top-k and/or nucleus (top-p) filtering</p> <p>From: <a href="https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317" rel="nofollow">https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317</a></p></div> <script type="module" data-hydrate="xs2w5z"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="xs2w5z"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/internal/generation_utils.mdx-8b17d2b2.js") ], params: {} } }); </script>
454
0
hf_public_repos/doc-build-dev/transformers/pr_16143/en
hf_public_repos/doc-build-dev/transformers/pr_16143/en/internal/tokenization_utils.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;utilities-for-tokenizers&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;transformers.PreTrainedTokenizerBase&quot;,&quot;title&quot;:&quot;PreTrainedTokenizerBase&quot;},{&quot;local&quot;:&quot;transformers.SpecialTokensMixin&quot;,&quot;title&quot;:&quot;SpecialTokensMixin&quot;},{&quot;local&quot;:&quot;transformers.tokenization_utils_base.TruncationStrategy&quot;,&quot;title&quot;:&quot;Enums and namedtuples&quot;}],&quot;title&quot;:&quot;Utilities for Tokenizers&quot;}" data-svelte="svelte-1phssyn"> <link rel="stylesheet" href="/docs/transformers/pr_16143/en/_app/assets/pages/__layout.svelte-a5c8879b.css"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/vendor-4833417e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/paths-4b3c6e7e.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/pages/internal/tokenization_utils.mdx-eeb10b9c.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Tip-fffd6df1.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/Docstring-4f315ed9.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/IconCopyLink-4b81c553.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CodeBlock-6a3d1b46.js"> <link rel="modulepreload" href="/docs/transformers/pr_16143/en/_app/chunks/CopyButton-dacfbfaf.js"> <h1 class="relative group"><a id="utilities-for-tokenizers" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#utilities-for-tokenizers"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Utilities for Tokenizers </span></h1> <p>This page lists all the utility functions used by the tokenizers, mainly the class <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase">PreTrainedTokenizerBase</a> that implements the common methods between <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a> and <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a> and the mixin <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.SpecialTokensMixin">SpecialTokensMixin</a>.</p> <p>Most of those are only useful if you are studying the code of the tokenizers in the library.</p> <h2 class="relative group"><a id="transformers.PreTrainedTokenizerBase" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>PreTrainedTokenizerBase </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">PreTrainedTokenizerBase</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L1433" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.model_max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.model_max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model_max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; The maximum length (in number of tokens) for the inputs to the transformer model. When the tokenizer is loaded with <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.from_pretrained">from_pretrained()</a>, this will be set to the value stored for the associated model in <code>max_model_input_sizes</code> (see above). If no value is provided, will default to VERY_LARGE_INTEGER (<code>int(1e30)</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.padding_side" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.padding_side"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding_side</strong> (<code>str</code>, <em>optional</em>) &#x2014; The side on which the model should have padding applied. Should be selected between [&#x2018;right&#x2019;, &#x2018;left&#x2019;]. Default value is picked from the class attribute of the same name.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.truncation_side" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.truncation_side"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>truncation_side</strong> (<code>str</code>, <em>optional</em>) &#x2014; The side on which the model should have truncation applied. Should be selected between [&#x2018;right&#x2019;, &#x2018;left&#x2019;]. Default value is picked from the class attribute of the same name.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.model_input_names" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.model_input_names"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>model_input_names</strong> (<code>List[string]</code>, <em>optional</em>) &#x2014; The list of inputs accepted by the forward pass of the model (like <code>&quot;token_type_ids&quot;</code> or <code>&quot;attention_mask&quot;</code>). Default value is picked from the class attribute of the same name.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.bos_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.bos_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bos_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the beginning of a sentence. Will be associated to <code>self.bos_token</code> and <code>self.bos_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.eos_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.eos_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the end of a sentence. Will be associated to <code>self.eos_token</code> and <code>self.eos_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.unk_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.unk_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>unk_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing an out-of-vocabulary token. Will be associated to <code>self.unk_token</code> and <code>self.unk_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.sep_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.sep_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sep_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token separating two different sentences in the same input (used by BERT for instance). Will be associated to <code>self.sep_token</code> and <code>self.sep_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.pad_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.pad_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by attention mechanisms or loss computation. Will be associated to <code>self.pad_token</code> and <code>self.pad_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.cls_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.cls_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cls_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the class of the input (used by BERT for instance). Will be associated to <code>self.cls_token</code> and <code>self.cls_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.mask_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.mask_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mask_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing a masked token (used by masked-language modeling pretraining objectives, like BERT). Will be associated to <code>self.mask_token</code> and <code>self.mask_token_id</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.additional_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.additional_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>additional_special_tokens</strong> (tuple or list of <code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A tuple or a list of additional special tokens. Add them here to ensure they won&#x2019;t be split by the tokenization process. Will be associated to <code>self.additional_special_tokens</code> and <code>self.additional_special_tokens_ids</code>.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Base class for <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a> and <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>.</p> <p>Handles shared (mostly boiler plate) methods for those two classes.</p> <p>Class attributes (overridden by derived classes)</p> <ul><li><strong>vocab_files_names</strong> (<code>Dict[str, str]</code>) — A dictionary with, as keys, the <code>__init__</code> keyword name of each vocabulary file required by the model, and as associated values, the filename for saving the associated file (string).</li> <li><strong>pretrained_vocab_files_map</strong> (<code>Dict[str, Dict[str, str]]</code>) — A dictionary of dictionaries, with the high-level keys being the <code>__init__</code> keyword name of each vocabulary file required by the model, the low-level being the <code>short-cut-names</code> of the pretrained models with, as associated values, the <code>url</code> to the associated pretrained vocabulary file.</li> <li><strong>max_model_input_sizes</strong> (<code>Dict[str, Optional[int]]</code>) — A dictionary with, as keys, the <code>short-cut-names</code> of the pretrained models, and as associated values, the maximum length of the sequence inputs of this model, or <code>None</code> if the model has no maximum input size.</li> <li><strong>pretrained_init_configuration</strong> (<code>Dict[str, Dict[str, Any]]</code>) — A dictionary with, as keys, the <code>short-cut-names</code> of the pretrained models, and as associated values, a dictionary of specific arguments to pass to the <code>__init__</code> method of the tokenizer class for this pretrained model when loading the tokenizer with the <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.from_pretrained">from_pretrained()</a> method.</li> <li><strong>model_input_names</strong> (<code>List[str]</code>) — A list of inputs expected in the forward pass of the model.</li> <li><strong>padding_side</strong> (<code>str</code>) — The default value for the side on which the model should have padding applied. Should be <code>&#39;right&#39;</code> or <code>&#39;left&#39;</code>.</li> <li><strong>truncation_side</strong> (<code>str</code>) — The default value for the side on which the model should have truncation applied. Should be <code>&#39;right&#39;</code> or <code>&#39;left&#39;</code>.</li></ul> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.__call__"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>__call__</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.__call__" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.__call__"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L2379" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text<span class="opacity-60">: typing.Union[str, typing.List[str], typing.List[typing.List[str]]]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text_pair<span class="opacity-60">: typing.Union[str, typing.List[str], typing.List[typing.List[str]], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">add_special_tokens<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">padding<span class="opacity-60">: typing.Union[bool, str, transformers.file_utils.PaddingStrategy] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">truncation<span class="opacity-60">: typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">stride<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">is_split_into_words<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_to_multiple_of<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: typing.Union[str, transformers.file_utils.TensorType, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_token_type_ids<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_attention_mask<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_overflowing_tokens<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_special_tokens_mask<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_offsets_mapping<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_length<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">verbose<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.text" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.text"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set <code>is_split_into_words=True</code> (to lift the ambiguity with a batch of sequences).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.text_pair" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.text_pair"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text_pair</strong> (<code>str</code>, <code>List[str]</code>, <code>List[List[str]]</code>) &#x2014; The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set <code>is_split_into_words=True</code> (to lift the ambiguity with a batch of sequences).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.add_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.add_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.padding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.padding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.truncation" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.truncation"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.stride" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.stride"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.is_split_into_words" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.is_split_into_words"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.pad_to_multiple_of" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.pad_to_multiple_of"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.return_token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.return_token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_token_type_ids</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.return_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.return_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.return_overflowing_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.return_overflowing_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_overflowing_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with <code>truncation_strategy = longest_first</code> or <code>True</code>, an error is raised instead of returning overflowing tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.return_special_tokens_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.return_special_tokens_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_special_tokens_mask</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return special tokens mask information.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.return_offsets_mapping" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.return_offsets_mapping"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_offsets_mapping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return <code>(char_start, char_end)</code> for each token.</p> <p>This is only available on fast tokenizers inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>, if using Python&#x2019;s tokenizer, this method will raise <code>NotImplementedError</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.return_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.return_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_length</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the lengths of the encoded inputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.__call__.verbose" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.__call__.verbose"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>verbose</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to print more information and warnings. **kwargs &#x2014; passed to the <code>self.tokenize()</code> method<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PreTrainedTokenizerBase.__call__.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a> with the following fields:</p> <ul> <li> <p><strong>input_ids</strong> — List of token ids to be fed to a model.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> </li> <li> <p><strong>token_type_ids</strong> — List of token type ids to be fed to a model (when <code>return_token_type_ids=True</code> or if <em>“token_type_ids”</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a></p> </li> <li> <p><strong>attention_mask</strong> — List of indices specifying which tokens should be attended to by the model (when <code>return_attention_mask=True</code> or if <em>“attention_mask”</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> </li> <li> <p><strong>overflowing_tokens</strong> — List of overflowing tokens sequences (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>num_truncated_tokens</strong> — Number of tokens truncated (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>special_tokens_mask</strong> — List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when <code>add_special_tokens=True</code> and <code>return_special_tokens_mask=True</code>).</p> </li> <li> <p><strong>length</strong> — The length of the inputs (when <code>return_length=True</code>)</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.as_target_tokenizer"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>as_target_tokenizer</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.as_target_tokenizer" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.as_target_tokenizer"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L3404" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Temporarily sets the tokenizer for encoding the targets. Useful for tokenizer associated to sequence-to-sequence models that need a slightly different processing for the labels.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.batch_decode"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>batch_decode</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.batch_decode" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.batch_decode"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L3250" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">sequences<span class="opacity-60">: typing.Union[typing.List[int], typing.List[typing.List[int]], ForwardRef(&#39;np.ndarray&#39;), ForwardRef(&#39;torch.Tensor&#39;), ForwardRef(&#39;tf.Tensor&#39;)]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">skip_special_tokens<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">clean_up_tokenization_spaces<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[str]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_decode.sequences" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_decode.sequences"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sequences</strong> (<code>Union[List[int], List[List[int]], np.ndarray, torch.Tensor, tf.Tensor]</code>) &#x2014; List of tokenized input ids. Can be obtained using the <code>__call__</code> method.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_decode.skip_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_decode.skip_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>skip_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to remove special tokens in the decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_decode.clean_up_tokenization_spaces" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_decode.clean_up_tokenization_spaces"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to clean up the tokenization spaces.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_decode.kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_decode.kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Will be passed to the underlying model specific decode method.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PreTrainedTokenizerBase.batch_decode.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[str]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The list of decoded sentences.</p> <!-- HTML_TAG_END --></p></div></div> <p>Convert a list of lists of token ids into a list of strings by calling decode.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.batch_encode_plus"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>batch_encode_plus</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.batch_encode_plus" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.batch_encode_plus"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L2600" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">batch_text_or_text_pairs<span class="opacity-60">: typing.Union[typing.List[str], typing.List[typing.Tuple[str, str]], typing.List[typing.List[str]], typing.List[typing.Tuple[typing.List[str], typing.List[str]]], typing.List[typing.List[int]], typing.List[typing.Tuple[typing.List[int], typing.List[int]]]]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">add_special_tokens<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">padding<span class="opacity-60">: typing.Union[bool, str, transformers.file_utils.PaddingStrategy] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">truncation<span class="opacity-60">: typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">stride<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">is_split_into_words<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_to_multiple_of<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: typing.Union[str, transformers.file_utils.TensorType, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_token_type_ids<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_attention_mask<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_overflowing_tokens<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_special_tokens_mask<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_offsets_mapping<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_length<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">verbose<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_encode_plus.batch_text_or_text_pairs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_encode_plus.batch_text_or_text_pairs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>batch_text_or_text_pairs</strong> (<code>List[str]</code>, <code>List[Tuple[str, str]]</code>, <code>List[List[str]]</code>, <code>List[Tuple[List[str], List[str]]]</code>, and for not-fast tokenizers, also <code>List[List[int]]</code>, <code>List[Tuple[List[int], List[int]]]</code>) &#x2014; Batch of sequences or pair of sequences to be encoded. This can be a list of string/string-sequences/int-sequences or a list of pair of string/string-sequences/int-sequence (see details in <code>encode_plus</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_encode_plus.add_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_encode_plus.add_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_encode_plus.padding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_encode_plus.padding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_encode_plus.truncation" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_encode_plus.truncation"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_encode_plus.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_encode_plus.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_encode_plus.stride" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_encode_plus.stride"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_encode_plus.is_split_into_words" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_encode_plus.is_split_into_words"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_encode_plus.pad_to_multiple_of" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_encode_plus.pad_to_multiple_of"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_encode_plus.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_encode_plus.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_encode_plus.return_token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_encode_plus.return_token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_token_type_ids</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_encode_plus.return_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_encode_plus.return_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_encode_plus.return_overflowing_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_encode_plus.return_overflowing_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_overflowing_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with <code>truncation_strategy = longest_first</code> or <code>True</code>, an error is raised instead of returning overflowing tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_encode_plus.return_special_tokens_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_encode_plus.return_special_tokens_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_special_tokens_mask</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return special tokens mask information.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_encode_plus.return_offsets_mapping" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_encode_plus.return_offsets_mapping"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_offsets_mapping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return <code>(char_start, char_end)</code> for each token.</p> <p>This is only available on fast tokenizers inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>, if using Python&#x2019;s tokenizer, this method will raise <code>NotImplementedError</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_encode_plus.return_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_encode_plus.return_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_length</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the lengths of the encoded inputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.batch_encode_plus.verbose" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.batch_encode_plus.verbose"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>verbose</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to print more information and warnings. **kwargs &#x2014; passed to the <code>self.tokenize()</code> method<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PreTrainedTokenizerBase.batch_encode_plus.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a> with the following fields:</p> <ul> <li> <p><strong>input_ids</strong> — List of token ids to be fed to a model.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> </li> <li> <p><strong>token_type_ids</strong> — List of token type ids to be fed to a model (when <code>return_token_type_ids=True</code> or if <em>“token_type_ids”</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a></p> </li> <li> <p><strong>attention_mask</strong> — List of indices specifying which tokens should be attended to by the model (when <code>return_attention_mask=True</code> or if <em>“attention_mask”</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> </li> <li> <p><strong>overflowing_tokens</strong> — List of overflowing tokens sequences (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>num_truncated_tokens</strong> — Number of tokens truncated (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>special_tokens_mask</strong> — List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when <code>add_special_tokens=True</code> and <code>return_special_tokens_mask=True</code>).</p> </li> <li> <p><strong>length</strong> — The length of the inputs (when <code>return_length=True</code>)</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>Tokenize and prepare for the model a list of sequences or a list of pairs of sequences.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"><p>This method is deprecated, <code>__call__</code> should be used instead.</p></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.build_inputs_with_special_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>build_inputs_with_special_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.build_inputs_with_special_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.build_inputs_with_special_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L2884" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.build_inputs_with_special_tokens.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.build_inputs_with_special_tokens.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; The first tokenized sequence.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.build_inputs_with_special_tokens.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.build_inputs_with_special_tokens.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; The second tokenized sequence.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PreTrainedTokenizerBase.build_inputs_with_special_tokens.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The model input with special tokens.</p> <!-- HTML_TAG_END --></p></div></div> <p>Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens.</p> <p>This implementation does not add special tokens and this method should be overridden in a subclass.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.clean_up_tokenization"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>clean_up_tokenization</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.clean_up_tokenization" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.clean_up_tokenization"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L3359" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">out_string<span class="opacity-60">: str</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>str</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.clean_up_tokenization.out_string" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.clean_up_tokenization.out_string"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>out_string</strong> (<code>str</code>) &#x2014; The text to clean up.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PreTrainedTokenizerBase.clean_up_tokenization.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>str</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The cleaned-up string.</p> <!-- HTML_TAG_END --></p></div></div> <p>Clean up a list of simple English tokenization artifacts like spaces before punctuations and abbreviated forms.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.convert_tokens_to_string"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>convert_tokens_to_string</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.convert_tokens_to_string" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.convert_tokens_to_string"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L3237" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tokens<span class="opacity-60">: typing.List[str]</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>str</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.convert_tokens_to_string.tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.convert_tokens_to_string.tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tokens</strong> (<code>List[str]</code>) &#x2014; The token to join in a string.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PreTrainedTokenizerBase.convert_tokens_to_string.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>str</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The joined tokens.</p> <!-- HTML_TAG_END --></p></div></div> <p>Converts a sequence of tokens in a single string. The most simple way to do it is <code>&quot; &quot;.join(tokens)</code> but we often want to remove sub-word tokenization artifacts at the same time.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.create_token_type_ids_from_sequences"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>create_token_type_ids_from_sequences</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.create_token_type_ids_from_sequences" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.create_token_type_ids_from_sequences"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L2864" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.create_token_type_ids_from_sequences.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.create_token_type_ids_from_sequences.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; The first tokenized sequence.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.create_token_type_ids_from_sequences.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.create_token_type_ids_from_sequences.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; The second tokenized sequence.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PreTrainedTokenizerBase.create_token_type_ids_from_sequences.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The token type ids.</p> <!-- HTML_TAG_END --></p></div></div> <p>Create the token type IDs corresponding to the sequences passed. <a href="../glossary#token-type-ids">What are token type IDs?</a></p> <p>Should be overridden in a subclass if the model has a special way of building those.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.decode"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>decode</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.decode" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.decode"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L3283" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids<span class="opacity-60">: typing.Union[int, typing.List[int], ForwardRef(&#39;np.ndarray&#39;), ForwardRef(&#39;torch.Tensor&#39;), ForwardRef(&#39;tf.Tensor&#39;)]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">skip_special_tokens<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">clean_up_tokenization_spaces<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>str</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.decode.token_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.decode.token_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids</strong> (<code>Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]</code>) &#x2014; List of tokenized input ids. Can be obtained using the <code>__call__</code> method.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.decode.skip_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.decode.skip_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>skip_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to remove special tokens in the decoding.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.decode.clean_up_tokenization_spaces" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.decode.clean_up_tokenization_spaces"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>clean_up_tokenization_spaces</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to clean up the tokenization spaces.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.decode.kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.decode.kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Will be passed to the underlying model specific decode method.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PreTrainedTokenizerBase.decode.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>str</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The decoded sentence.</p> <!-- HTML_TAG_END --></p></div></div> <p>Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces.</p> <p>Similar to doing <code>self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))</code>.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.encode"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>encode</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.encode" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.encode"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L2189" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text<span class="opacity-60">: typing.Union[str, typing.List[str], typing.List[int]]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text_pair<span class="opacity-60">: typing.Union[str, typing.List[str], typing.List[int], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">add_special_tokens<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">padding<span class="opacity-60">: typing.Union[bool, str, transformers.file_utils.PaddingStrategy] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">truncation<span class="opacity-60">: typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">stride<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: typing.Union[str, transformers.file_utils.TensorType, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[int]</code>, <code>torch.Tensor</code>, <code>tf.Tensor</code> or <code>np.ndarray</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode.text" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode.text"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text</strong> (<code>str</code>, <code>List[str]</code> or <code>List[int]</code>) &#x2014; The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the <code>tokenize</code> method) or a list of integers (tokenized string ids using the <code>convert_tokens_to_ids</code> method).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode.text_pair" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode.text_pair"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text_pair</strong> (<code>str</code>, <code>List[str]</code> or <code>List[int]</code>, <em>optional</em>) &#x2014; Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the <code>tokenize</code> method) or a list of integers (tokenized string ids using the <code>convert_tokens_to_ids</code> method).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode.add_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode.add_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode.padding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode.padding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode.truncation" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode.truncation"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode.stride" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode.stride"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode.is_split_into_words" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode.is_split_into_words"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode.pad_to_multiple_of" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode.pad_to_multiple_of"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul><!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PreTrainedTokenizerBase.encode.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[int]</code>, <code>torch.Tensor</code>, <code>tf.Tensor</code> or <code>np.ndarray</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The tokenized ids of the text.</p> <!-- HTML_TAG_END --></p></div></div> <p>Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary.</p> <p>Same as doing <code>self.convert_tokens_to_ids(self.tokenize(text))</code>.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.encode_plus"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>encode_plus</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.encode_plus" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.encode_plus"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L2504" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text<span class="opacity-60">: typing.Union[str, typing.List[str], typing.List[int]]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text_pair<span class="opacity-60">: typing.Union[str, typing.List[str], typing.List[int], NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">add_special_tokens<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">padding<span class="opacity-60">: typing.Union[bool, str, transformers.file_utils.PaddingStrategy] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">truncation<span class="opacity-60">: typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">stride<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">is_split_into_words<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_to_multiple_of<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: typing.Union[str, transformers.file_utils.TensorType, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_token_type_ids<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_attention_mask<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_overflowing_tokens<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_special_tokens_mask<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_offsets_mapping<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_length<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">verbose<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode_plus.text" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode_plus.text"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text</strong> (<code>str</code>, <code>List[str]</code> or <code>List[int]</code> (the latter only for not-fast tokenizers)) &#x2014; The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the <code>tokenize</code> method) or a list of integers (tokenized string ids using the <code>convert_tokens_to_ids</code> method).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode_plus.text_pair" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode_plus.text_pair"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text_pair</strong> (<code>str</code>, <code>List[str]</code> or <code>List[int]</code>, <em>optional</em>) &#x2014; Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the <code>tokenize</code> method) or a list of integers (tokenized string ids using the <code>convert_tokens_to_ids</code> method).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode_plus.add_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode_plus.add_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode_plus.padding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode_plus.padding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode_plus.truncation" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode_plus.truncation"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode_plus.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode_plus.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode_plus.stride" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode_plus.stride"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode_plus.is_split_into_words" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode_plus.is_split_into_words"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode_plus.pad_to_multiple_of" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode_plus.pad_to_multiple_of"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode_plus.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode_plus.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode_plus.return_token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode_plus.return_token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_token_type_ids</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode_plus.return_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode_plus.return_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode_plus.return_overflowing_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode_plus.return_overflowing_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_overflowing_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with <code>truncation_strategy = longest_first</code> or <code>True</code>, an error is raised instead of returning overflowing tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode_plus.return_special_tokens_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode_plus.return_special_tokens_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_special_tokens_mask</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return special tokens mask information.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode_plus.return_offsets_mapping" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode_plus.return_offsets_mapping"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_offsets_mapping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return <code>(char_start, char_end)</code> for each token.</p> <p>This is only available on fast tokenizers inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>, if using Python&#x2019;s tokenizer, this method will raise <code>NotImplementedError</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode_plus.return_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode_plus.return_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_length</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the lengths of the encoded inputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.encode_plus.verbose" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.encode_plus.verbose"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>verbose</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to print more information and warnings. **kwargs &#x2014; passed to the <code>self.tokenize()</code> method<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PreTrainedTokenizerBase.encode_plus.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a> with the following fields:</p> <ul> <li> <p><strong>input_ids</strong> — List of token ids to be fed to a model.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> </li> <li> <p><strong>token_type_ids</strong> — List of token type ids to be fed to a model (when <code>return_token_type_ids=True</code> or if <em>“token_type_ids”</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a></p> </li> <li> <p><strong>attention_mask</strong> — List of indices specifying which tokens should be attended to by the model (when <code>return_attention_mask=True</code> or if <em>“attention_mask”</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> </li> <li> <p><strong>overflowing_tokens</strong> — List of overflowing tokens sequences (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>num_truncated_tokens</strong> — Number of tokens truncated (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>special_tokens_mask</strong> — List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when <code>add_special_tokens=True</code> and <code>return_special_tokens_mask=True</code>).</p> </li> <li> <p><strong>length</strong> — The length of the inputs (when <code>return_length=True</code>)</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>Tokenize and prepare for the model a sequence or a pair of sequences.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"><p>This method is deprecated, <code>__call__</code> should be used instead.</p></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.from_pretrained"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>from_pretrained</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.from_pretrained" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.from_pretrained"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L1551" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pretrained_model_name_or_path<span class="opacity-60">: typing.Union[str, os.PathLike]</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">*init_inputs<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.from_pretrained.pretrained_model_name_or_path" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.from_pretrained.pretrained_model_name_or_path"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pretrained_model_name_or_path</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; Can be either:</p> <ul> <li>A string, the <em>model id</em> of a predefined tokenizer hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like <code>bert-base-uncased</code>, or namespaced under a user or organization name, like <code>dbmdz/bert-base-german-cased</code>.</li> <li>A path to a <em>directory</em> containing vocabulary files required by the tokenizer, for instance saved using the <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.save_pretrained">save_pretrained()</a> method, e.g., <code>./my_model_directory/</code>.</li> <li>(<strong>Deprecated</strong>, not applicable to all derived classes) A path or url to a single saved vocabulary file (if and only if the tokenizer only requires a single vocabulary file like Bert or XLNet), e.g., <code>./my_model_directory/vocab.txt</code>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.from_pretrained.cache_dir" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.from_pretrained.cache_dir"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cache_dir</strong> (<code>str</code> or <code>os.PathLike</code>, <em>optional</em>) &#x2014; Path to a directory in which a downloaded predefined tokenizer vocabulary files should be cached if the standard cache should not be used.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.from_pretrained.force_download" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.from_pretrained.force_download"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>force_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to force the (re-)download the vocabulary files and override the cached versions if they exist.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.from_pretrained.resume_download" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.from_pretrained.resume_download"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>resume_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to delete incompletely received files. Attempt to resume the download if such a file exists.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.from_pretrained.proxies" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.from_pretrained.proxies"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>proxies</strong> (<code>Dict[str, str]</code>, <em>optional</em>) &#x2014; A dictionary of proxy servers to use by protocol or endpoint, e.g., <code>{&apos;http&apos;: &apos;foo.bar:3128&apos;, &apos;http://hostname&apos;: &apos;foo.bar:4012&apos;}</code>. The proxies are used on each request.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.from_pretrained.use_auth_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.from_pretrained.use_auth_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_auth_token</strong> (<code>str</code> or <em>bool</em>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>transformers-cli login</code> (stored in <code>~/.huggingface</code>).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.from_pretrained.local_files_only" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.from_pretrained.local_files_only"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>local_files_only</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to only rely on local files and not to attempt to download any files.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.from_pretrained.revision(str," class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.from_pretrained.revision(str,"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>revision(<code>str</code>,</strong> <em>optional</em>, defaults to <code>&quot;main&quot;</code>) &#x2014; The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so <code>revision</code> can be any identifier allowed by git.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.from_pretrained.subfolder" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.from_pretrained.subfolder"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>subfolder</strong> (<code>str</code>, <em>optional</em>) &#x2014; In case the relevant files are located inside a subfolder of the model repo on huggingface.co (e.g. for facebook/rag-token-base), specify it here.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.from_pretrained.inputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.from_pretrained.inputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>inputs</strong> (additional positional arguments, <em>optional</em>) &#x2014; Will be passed along to the Tokenizer <code>__init__</code> method.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.from_pretrained.kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.from_pretrained.kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Will be passed to the Tokenizer <code>__init__</code> method. Can be used to set special tokens like <code>bos_token</code>, <code>eos_token</code>, <code>unk_token</code>, <code>sep_token</code>, <code>pad_token</code>, <code>cls_token</code>, <code>mask_token</code>, <code>additional_special_tokens</code>. See parameters in the <code>__init__</code> for more details.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Instantiate a <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase">PreTrainedTokenizerBase</a> (or a derived class) from a predefined tokenizer.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Passing <code>use_auth_token=True</code> is required when you want to use a private model.</p></div> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-comment"># We can&#x27;t instantiate directly the base class *PreTrainedTokenizerBase* so let&#x27;s show our examples on a derived class: BertTokenizer</span> <span class="hljs-comment"># Download vocabulary from huggingface.co and cache.</span> tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>) <span class="hljs-comment"># Download vocabulary from huggingface.co (user-uploaded) and cache.</span> tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&quot;dbmdz/bert-base-german-cased&quot;</span>) <span class="hljs-comment"># If vocabulary files are in a directory (e.g. tokenizer was saved using *save_pretrained(&#x27;./test/saved_model/&#x27;)*)</span> tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&quot;./test/saved_model/&quot;</span>) <span class="hljs-comment"># If the tokenizer uses a single vocabulary file, you can point directly to this file</span> tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&quot;./test/saved_model/my_vocab.txt&quot;</span>) <span class="hljs-comment"># You can link tokens to special vocabulary when instantiating</span> tokenizer = BertTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>, unk_token=<span class="hljs-string">&quot;&lt;unk&gt;&quot;</span>) <span class="hljs-comment"># You should be sure &#x27;&lt;unk&gt;&#x27; is in the vocabulary when doing that.</span> <span class="hljs-comment"># Otherwise use tokenizer.add_special_tokens({&#x27;unk_token&#x27;: &#x27;&lt;unk&gt;&#x27;}) instead)</span> <span class="hljs-keyword">assert</span> tokenizer.unk_token == <span class="hljs-string">&quot;&lt;unk&gt;&quot;</span><!-- HTML_TAG_END --></pre></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.get_special_tokens_mask"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_special_tokens_mask</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.get_special_tokens_mask" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.get_special_tokens_mask"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L3328" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_0<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">token_ids_1<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">already_has_special_tokens<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>A list of integers in the range [0, 1]</span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.get_special_tokens_mask.token_ids_0" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.get_special_tokens_mask.token_ids_0"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_0</strong> (<code>List[int]</code>) &#x2014; List of ids of the first sequence.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.get_special_tokens_mask.token_ids_1" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.get_special_tokens_mask.token_ids_1"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>token_ids_1</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; List of ids of the second sequence.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.get_special_tokens_mask.already_has_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.get_special_tokens_mask.already_has_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>already_has_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the token list is already formatted with special tokens for the model.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PreTrainedTokenizerBase.get_special_tokens_mask.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>A list of integers in the range [0, 1]</p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>1 for a special token, 0 for a sequence token.</p> <!-- HTML_TAG_END --></p></div></div> <p>Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer <code>prepare_for_model</code> or <code>encode_plus</code> methods.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.get_vocab"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>get_vocab</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.get_vocab" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.get_vocab"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L1539" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>Dict[str, int]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PreTrainedTokenizerBase.get_vocab.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>Dict[str, int]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The vocabulary.</p> <!-- HTML_TAG_END --></p></div></div> <p>Returns the vocabulary as a dictionary of token to index.</p> <p><code>tokenizer.get_vocab()[token]</code> is equivalent to <code>tokenizer.convert_tokens_to_ids(token)</code> when <code>token</code> is in the vocab.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.pad"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>pad</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.pad" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.pad"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L2703" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">encoded_inputs<span class="opacity-60">: typing.Union[transformers.tokenization_utils_base.BatchEncoding, typing.List[transformers.tokenization_utils_base.BatchEncoding], typing.Dict[str, typing.List[int]], typing.Dict[str, typing.List[typing.List[int]]], typing.List[typing.Dict[str, typing.List[int]]]]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">padding<span class="opacity-60">: typing.Union[bool, str, transformers.file_utils.PaddingStrategy] = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_to_multiple_of<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_attention_mask<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: typing.Union[str, transformers.file_utils.TensorType, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">verbose<span class="opacity-60">: bool = True</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.pad.encoded_inputs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.pad.encoded_inputs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>encoded_inputs</strong> (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding">BatchEncoding</a>, list of <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding">BatchEncoding</a>, <code>Dict[str, List[int]]</code>, <code>Dict[str, List[List[int]]</code> or <code>List[Dict[str, List[int]]]</code>) &#x2014; Tokenized inputs. Can represent one input (<a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding">BatchEncoding</a> or <code>Dict[str, List[int]]</code>) or a batch of tokenized inputs (list of <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding">BatchEncoding</a>, <em>Dict[str, List[List[int]]]</em> or <em>List[Dict[str, List[int]]]</em>) so you can use this method during preprocessing as well as in a PyTorch Dataloader collate function.</p> <p>Instead of <code>List[int]</code> you can have tensors (numpy arrays, PyTorch tensors or TensorFlow tensors), see the note above for the return type.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.pad.padding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.pad.padding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Select a strategy to pad the returned sequences (according to the model&#x2019;s padding side and padding index) among:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.pad.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.pad.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Maximum length of the returned list and optionally padding length (see above).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.pad.pad_to_multiple_of" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.pad.pad_to_multiple_of"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value.</p> <p>This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability</p> <blockquote> <p>= 7.5 (Volta).</p> </blockquote><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.pad.return_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.pad.return_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.pad.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.pad.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.pad.verbose" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.pad.verbose"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>verbose</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to print more information and warnings.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Pad a single encoded input or a batch of encoded inputs up to predefined length or to the max sequence length in the batch.</p> <p>Padding side (left/right) padding token ids are defined at the tokenizer level (with <code>self.padding_side</code>, <code>self.pad_token_id</code> and <code>self.pad_token_type_id</code>)</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>If the <code>encoded_inputs</code> passed are dictionary of numpy arrays, PyTorch tensors or TensorFlow tensors, the result will use the same type unless you provide a different tensor type with <code>return_tensors</code>. In the case of PyTorch tensors, you will lose the specific device of your tensors however.</p></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.prepare_for_model"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>prepare_for_model</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.prepare_for_model" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.prepare_for_model"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L2904" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ids<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pair_ids<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">add_special_tokens<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">padding<span class="opacity-60">: typing.Union[bool, str, transformers.file_utils.PaddingStrategy] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">truncation<span class="opacity-60">: typing.Union[bool, str, transformers.tokenization_utils_base.TruncationStrategy] = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">stride<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pad_to_multiple_of<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: typing.Union[str, transformers.file_utils.TensorType, NoneType] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_token_type_ids<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_attention_mask<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_overflowing_tokens<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_special_tokens_mask<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_offsets_mapping<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_length<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">verbose<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">prepend_batch_axis<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_for_model.ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_for_model.ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ids</strong> (<code>List[int]</code>) &#x2014; Tokenized input ids of the first sequence. Can be obtained from a string by chaining the <code>tokenize</code> and <code>convert_tokens_to_ids</code> methods.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_for_model.pair_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_for_model.pair_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pair_ids</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Tokenized input ids of the second sequence. Can be obtained from a string by chaining the <code>tokenize</code> and <code>convert_tokens_to_ids</code> methods.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_for_model.add_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_for_model.add_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to encode the sequences with the special tokens relative to their model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_for_model.padding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_for_model.padding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_for_model.truncation" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_for_model.truncation"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_for_model.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_for_model.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length to use by one of the truncation/padding parameters.</p> <p>If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_for_model.stride" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_for_model.stride"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a number along with <code>max_length</code>, the overflowing tokens returned when <code>return_overflowing_tokens=True</code> will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_for_model.is_split_into_words" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_for_model.is_split_into_words"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>is_split_into_words</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not the input is already pre-tokenized (e.g., split into words). If set to <code>True</code>, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_for_model.pad_to_multiple_of" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_for_model.pad_to_multiple_of"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_to_multiple_of</strong> (<code>int</code>, <em>optional</em>) &#x2014; If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability &gt;= 7.5 (Volta).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_for_model.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_for_model.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_for_model.return_token_type_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_for_model.return_token_type_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_token_type_ids</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_for_model.return_attention_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_for_model.return_attention_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_attention_mask</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer&#x2019;s default, defined by the <code>return_outputs</code> attribute.</p> <p><a href="../glossary#attention-mask">What are attention masks?</a><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_for_model.return_overflowing_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_for_model.return_overflowing_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_overflowing_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with <code>truncation_strategy = longest_first</code> or <code>True</code>, an error is raised instead of returning overflowing tokens.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_for_model.return_special_tokens_mask" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_for_model.return_special_tokens_mask"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_special_tokens_mask</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return special tokens mask information.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_for_model.return_offsets_mapping" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_for_model.return_offsets_mapping"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_offsets_mapping</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return <code>(char_start, char_end)</code> for each token.</p> <p>This is only available on fast tokenizers inheriting from <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a>, if using Python&#x2019;s tokenizer, this method will raise <code>NotImplementedError</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_for_model.return_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_for_model.return_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_length</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to return the lengths of the encoded inputs.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_for_model.verbose" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_for_model.verbose"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>verbose</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Whether or not to print more information and warnings. **kwargs &#x2014; passed to the <code>self.tokenize()</code> method<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PreTrainedTokenizerBase.prepare_for_model.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a> with the following fields:</p> <ul> <li> <p><strong>input_ids</strong> — List of token ids to be fed to a model.</p> <p><a href="../glossary#input-ids">What are input IDs?</a></p> </li> <li> <p><strong>token_type_ids</strong> — List of token type ids to be fed to a model (when <code>return_token_type_ids=True</code> or if <em>“token_type_ids”</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#token-type-ids">What are token type IDs?</a></p> </li> <li> <p><strong>attention_mask</strong> — List of indices specifying which tokens should be attended to by the model (when <code>return_attention_mask=True</code> or if <em>“attention_mask”</em> is in <code>self.model_input_names</code>).</p> <p><a href="../glossary#attention-mask">What are attention masks?</a></p> </li> <li> <p><strong>overflowing_tokens</strong> — List of overflowing tokens sequences (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>num_truncated_tokens</strong> — Number of tokens truncated (when a <code>max_length</code> is specified and <code>return_overflowing_tokens=True</code>).</p> </li> <li> <p><strong>special_tokens_mask</strong> — List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when <code>add_special_tokens=True</code> and <code>return_special_tokens_mask=True</code>).</p> </li> <li> <p><strong>length</strong> — The length of the inputs (when <code>return_length=True</code>)</p> </li> </ul> <!-- HTML_TAG_END --></p></div></div> <p>Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens. Please Note, for <em>pair_ids</em> different than <code>None</code> and <em>truncation_strategy = longest_first</em> or <code>True</code>, it is not possible to return overflowing tokens. Such a combination of arguments will raise an error.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>prepare_seq2seq_batch</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L3438" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">src_texts<span class="opacity-60">: typing.List[str]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">tgt_texts<span class="opacity-60">: typing.Optional[typing.List[str]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">max_target_length<span class="opacity-60">: typing.Optional[int] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">padding<span class="opacity-60">: str = &#39;longest&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">return_tensors<span class="opacity-60">: str = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">truncation<span class="opacity-60">: bool = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch.src_texts" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch.src_texts"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>src_texts</strong> (<code>List[str]</code>) &#x2014; List of documents to summarize or source language texts.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch.tgt_texts" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch.tgt_texts"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>tgt_texts</strong> (<code>list</code>, <em>optional</em>) &#x2014; List of summaries or target language texts.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch.max_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch.max_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length for encoder inputs (documents to summarize or source language texts) If left unset or set to <code>None</code>, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch.max_target_length" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch.max_target_length"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>max_target_length</strong> (<code>int</code>, <em>optional</em>) &#x2014; Controls the maximum length of decoder inputs (target language texts or summaries) If left unset or set to <code>None</code>, this will use the max_length value.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch.padding" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch.padding"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>padding</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.file_utils.PaddingStrategy">PaddingStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Activates and controls padding. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest&apos;</code>: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided).</li> <li><code>&apos;max_length&apos;</code>: Pad to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided.</li> <li><code>False</code> or <code>&apos;do_not_pad&apos;</code> (default): No padding (i.e., can output a batch with sequences of different lengths).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch.return_tensors" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch.return_tensors"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>return_tensors</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/file_utils#transformers.TensorType">TensorType</a>, <em>optional</em>) &#x2014; If set, will return tensors instead of list of python integers. Acceptable values are:</p> <ul> <li><code>&apos;tf&apos;</code>: Return TensorFlow <code>tf.constant</code> objects.</li> <li><code>&apos;pt&apos;</code>: Return PyTorch <code>torch.Tensor</code> objects.</li> <li><code>&apos;np&apos;</code>: Return Numpy <code>np.ndarray</code> objects.</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch.truncation" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch.truncation"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>truncation</strong> (<code>bool</code>, <code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>True</code>) &#x2014; Activates and controls truncation. Accepts the following values:</p> <ul> <li><code>True</code> or <code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>False</code> or <code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). **kwargs &#x2014; Additional keyword arguments passed along to <code>self.__call__</code>.</li> </ul><!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PreTrainedTokenizerBase.prepare_seq2seq_batch.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>A <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.BatchEncoding" >BatchEncoding</a> with the following fields:</p> <ul> <li><strong>input_ids</strong> — List of token ids to be fed to the encoder.</li> <li><strong>attention_mask</strong> — List of indices specifying which tokens should be attended to by the model.</li> <li><strong>labels</strong> — List of token ids for tgt_texts.</li> </ul> <p>The full set of keys <code>[input_ids, attention_mask, labels]</code>, will only be returned if tgt_texts is passed. Otherwise, input_ids, attention_mask will be the only keys.</p> <!-- HTML_TAG_END --></p></div></div> <p>Prepare model inputs for translation. For best performance, translate one sentence at a time.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.file_utils.PushToHubMixin.push_to_hub"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>push_to_hub</span></h4><!-- HTML_TAG_END --> <a id="transformers.file_utils.PushToHubMixin.push_to_hub" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.file_utils.PushToHubMixin.push_to_hub"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/file_utils.py#L2842" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">repo_path_or_name<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">repo_url<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_temp_dir<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">commit_message<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">organization<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">private<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">use_auth_token<span class="opacity-60">: typing.Union[bool, str, NoneType] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**model_card_kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>str</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.repo_path_or_name" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.repo_path_or_name"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>repo_path_or_name</strong> (<code>str</code>, <em>optional</em>) &#x2014; Can either be a repository name for your tokenizer in the Hub or a path to a local folder (in which case the repository will have the name of that local folder). If not specified, will default to the name given by <code>repo_url</code> and a local directory with that name will be created.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.repo_url" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.repo_url"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>repo_url</strong> (<code>str</code>, <em>optional</em>) &#x2014; Specify this in case you want to push to an existing repository in the hub. If unspecified, a new repository will be created in your namespace (unless you specify an <code>organization</code>) with <code>repo_name</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.use_temp_dir" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.use_temp_dir"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_temp_dir</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to clone the distant repo in a temporary directory or in <code>repo_path_or_name</code> inside the current working directory. This will slow things down if you are making changes in an existing repo since you will need to clone the repo before every push.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.commit_message" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.commit_message"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>commit_message</strong> (<code>str</code>, <em>optional</em>) &#x2014; Message to commit while pushing. Will default to <code>&quot;add tokenizer&quot;</code>.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.organization" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.organization"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>organization</strong> (<code>str</code>, <em>optional</em>) &#x2014; Organization in which you want to push your tokenizer (you must be a member of this organization).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.private" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.private"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>private</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Whether or not the repository created should be private (requires a paying subscription).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.file_utils.PushToHubMixin.push_to_hub.use_auth_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.file_utils.PushToHubMixin.push_to_hub.use_auth_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>use_auth_token</strong> (<code>bool</code> or <code>str</code>, <em>optional</em>) &#x2014; The token to use as HTTP bearer authorization for remote files. If <code>True</code>, will use the token generated when running <code>transformers-cli login</code> (stored in <code>~/.huggingface</code>). Will default to <code>True</code> if <code>repo_url</code> is not specified.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.file_utils.PushToHubMixin.push_to_hub.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>str</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The url of the commit of your tokenizer in the given repository.</p> <!-- HTML_TAG_END --></p></div></div> <p>Upload the tokenizer files to the 🤗 Model Hub while synchronizing a local clone of the repo in <code>repo_path_or_name</code>.</p> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-comment"># Push the tokenizer to your namespace with the name &quot;my-finetuned-bert&quot; and have a local clone in the</span> <span class="hljs-comment"># *my-finetuned-bert* folder.</span> tokenizer.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>) <span class="hljs-comment"># Push the tokenizer to your namespace with the name &quot;my-finetuned-bert&quot; with no local clone.</span> tokenizer.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, use_temp_dir=<span class="hljs-literal">True</span>) <span class="hljs-comment"># Push the tokenizer to an organization with the name &quot;my-finetuned-bert&quot; and have a local clone in the</span> <span class="hljs-comment"># *my-finetuned-bert* folder.</span> tokenizer.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, organization=<span class="hljs-string">&quot;huggingface&quot;</span>) <span class="hljs-comment"># Make a change to an existing repo that has been cloned locally in *my-finetuned-bert*.</span> tokenizer.push_to_hub(<span class="hljs-string">&quot;my-finetuned-bert&quot;</span>, repo_url=<span class="hljs-string">&quot;https://huggingface.co/sgugger/my-finetuned-bert&quot;</span>)<!-- HTML_TAG_END --></pre></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.register_for_auto_class"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>register_for_auto_class</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.register_for_auto_class" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.register_for_auto_class"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L3412" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">auto_class<span class="opacity-60"> = &#39;AutoTokenizer&#39;</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.register_for_auto_class.auto_class" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.register_for_auto_class.auto_class"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>auto_class</strong> (<code>str</code> or <code>type</code>, <em>optional</em>, defaults to <code>&quot;AutoTokenizer&quot;</code>) &#x2014; The auto class to register this new tokenizer with.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Register this class with a given auto class. This should only be used for custom tokenizers as the ones in the library are already mapped with <code>AutoTokenizer</code>.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"><p>This API is experimental and may have some slight breaking changes in the next releases.</p></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.save_pretrained"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save_pretrained</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.save_pretrained" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.save_pretrained"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L1990" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_directory<span class="opacity-60">: typing.Union[str, os.PathLike]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">legacy_format<span class="opacity-60">: typing.Optional[bool] = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">filename_prefix<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">push_to_hub<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span>A tuple of <code>str</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.save_pretrained.save_directory" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.save_pretrained.save_directory"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>save_directory</strong> (<code>str</code> or <code>os.PathLike</code>) &#x2014; The path to a directory where the tokenizer will be saved.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.save_pretrained.legacy_format" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.save_pretrained.legacy_format"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>legacy_format</strong> (<code>bool</code>, <em>optional</em>) &#x2014; Only applicable for a fast tokenizer. If unset (default), will save the tokenizer in the unified JSON format as well as in legacy format if it exists, i.e. with tokenizer specific vocabulary and a separate added_tokens files.</p> <p>If <code>False</code>, will only save the tokenizer in the unified JSON format. This format is incompatible with &#x201C;slow&#x201D; tokenizers (not powered by the <em>tokenizers</em> library), so the tokenizer will not be able to be loaded in the corresponding &#x201C;slow&#x201D; tokenizer.</p> <p>If <code>True</code>, will save the tokenizer in legacy format. If the &#x201C;slow&#x201D; tokenizer doesn&#x2019;t exits, a value error is raised. filename_prefix &#x2014; (<code>str</code>, <em>optional</em>): A prefix to add to the names of the files saved by the tokenizer.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.save_pretrained.push_to_hub" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.save_pretrained.push_to_hub"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>push_to_hub</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to push your model to the Hugging Face model hub after saving it.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p>Using <code>push_to_hub=True</code> will synchronize the repository you are pushing to with <code>save_directory</code>, which requires <code>save_directory</code> to be a local clone of the repo you are pushing to if it&#x2019;s an existing folder. Pass along <code>temp_dir=True</code> to use a temporary directory instead.</p> </div><!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PreTrainedTokenizerBase.save_pretrained.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p>A tuple of <code>str</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The files saved.</p> <!-- HTML_TAG_END --></p></div></div> <p>Save the full tokenizer state.</p> <p>This method make sure the full tokenizer can then be re-loaded using the <code>from_pretrained</code> class method..</p> <p>Warning,None This won’t save modifications you may have applied to the tokenizer after the instantiation (for instance, modifying <code>tokenizer.do_lower_case</code> after creation).</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.save_vocabulary"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>save_vocabulary</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.save_vocabulary" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.save_vocabulary"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L2151" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">save_directory<span class="opacity-60">: str</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">filename_prefix<span class="opacity-60">: typing.Optional[str] = None</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>Tuple(str)</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.save_vocabulary.save_directory" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.save_vocabulary.save_directory"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>save_directory</strong> (<code>str</code>) &#x2014; The directory in which to save the vocabulary.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.save_vocabulary.filename_prefix" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.save_vocabulary.filename_prefix"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>filename_prefix</strong> (<code>str</code>, <em>optional</em>) &#x2014; An optional prefix to add to the named of the saved files.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PreTrainedTokenizerBase.save_vocabulary.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>Tuple(str)</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Paths to the files saved.</p> <!-- HTML_TAG_END --></p></div></div> <p>Save only the vocabulary of the tokenizer (vocabulary + added tokens).</p> <p>This method won’t save the configuration and special token mappings of the tokenizer. Use <code>_save_pretrained()</code>to save the whole state of the tokenizer.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.tokenize"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>tokenize</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.tokenize" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.tokenize"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L2169" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">text<span class="opacity-60">: str</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pair<span class="opacity-60">: typing.Optional[str] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">add_special_tokens<span class="opacity-60">: bool = False</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>List[str]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.tokenize.text" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.tokenize.text"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>text</strong> (<code>str</code>) &#x2014; The sequence to be encoded.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.tokenize.pair" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.tokenize.pair"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pair</strong> (<code>str</code>, <em>optional</em>) &#x2014; A second sequence to be encoded with the first.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.tokenize.add_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.tokenize.add_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>add_special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Whether or not to add the special tokens associated with the corresponding model.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.tokenize.kwargs" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.tokenize.kwargs"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>kwargs</strong> (additional keyword arguments, <em>optional</em>) &#x2014; Will be passed to the underlying model specific encode method. See details in <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__"><strong>call</strong>()</a><!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PreTrainedTokenizerBase.tokenize.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>List[str]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The list of tokens.</p> <!-- HTML_TAG_END --></p></div></div> <p>Converts a string in a sequence of tokens, replacing unknown tokens with the <code>unk_token</code>.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.PreTrainedTokenizerBase.truncate_sequences"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>truncate_sequences</span></h4><!-- HTML_TAG_END --> <a id="transformers.PreTrainedTokenizerBase.truncate_sequences" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.PreTrainedTokenizerBase.truncate_sequences"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L3040" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">ids<span class="opacity-60">: typing.List[int]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">pair_ids<span class="opacity-60">: typing.Optional[typing.List[int]] = None</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">num_tokens_to_remove<span class="opacity-60">: int = 0</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">truncation_strategy<span class="opacity-60">: typing.Union[str, transformers.tokenization_utils_base.TruncationStrategy] = &#39;longest_first&#39;</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">stride<span class="opacity-60">: int = 0</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>Tuple[List[int], List[int], List[int]]</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.truncate_sequences.ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.truncate_sequences.ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>ids</strong> (<code>List[int]</code>) &#x2014; Tokenized input ids of the first sequence. Can be obtained from a string by chaining the <code>tokenize</code> and <code>convert_tokens_to_ids</code> methods.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.truncate_sequences.pair_ids" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.truncate_sequences.pair_ids"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pair_ids</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014; Tokenized input ids of the second sequence. Can be obtained from a string by chaining the <code>tokenize</code> and <code>convert_tokens_to_ids</code> methods.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.truncate_sequences.num_tokens_to_remove" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.truncate_sequences.num_tokens_to_remove"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>num_tokens_to_remove</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; Number of tokens to remove using the truncation strategy.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.truncate_sequences.truncation_strategy" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.truncate_sequences.truncation_strategy"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>truncation_strategy</strong> (<code>str</code> or <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy">TruncationStrategy</a>, <em>optional</em>, defaults to <code>False</code>) &#x2014; The strategy to follow for truncation. Can be:</p> <ul> <li><code>&apos;longest_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_first&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;only_second&apos;</code>: Truncate to a maximum length specified with the argument <code>max_length</code> or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.</li> <li><code>&apos;do_not_truncate&apos;</code> (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size).</li> </ul><!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.PreTrainedTokenizerBase.truncate_sequences.stride" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.PreTrainedTokenizerBase.truncate_sequences.stride"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>stride</strong> (<code>int</code>, <em>optional</em>, defaults to 0) &#x2014; If set to a positive number, the overflowing tokens returned will contain some tokens from the main sequence returned. The value of this argument defines the number of additional tokens.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.PreTrainedTokenizerBase.truncate_sequences.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>Tuple[List[int], List[int], List[int]]</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The truncated <code>ids</code>, the truncated <code>pair_ids</code> and the list of overflowing tokens. Note: The <em>longest_first</em> strategy returns empty list of overflowing tokens if a pair of sequences (or a batch of pairs) is provided.</p> <!-- HTML_TAG_END --></p></div></div> <p>Truncates a sequence pair in-place following the strategy.</p></div></div> <h2 class="relative group"><a id="transformers.SpecialTokensMixin" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpecialTokensMixin"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>SpecialTokensMixin </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.SpecialTokensMixin"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">SpecialTokensMixin</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.SpecialTokensMixin" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.SpecialTokensMixin"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L753" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">verbose<span class="opacity-60"> = True</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">**kwargs<span class="opacity-60"></span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SpecialTokensMixin.bos_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpecialTokensMixin.bos_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>bos_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the beginning of a sentence.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SpecialTokensMixin.eos_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpecialTokensMixin.eos_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>eos_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the end of a sentence.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SpecialTokensMixin.unk_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpecialTokensMixin.unk_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>unk_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing an out-of-vocabulary token.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SpecialTokensMixin.sep_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpecialTokensMixin.sep_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>sep_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token separating two different sentences in the same input (used by BERT for instance).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SpecialTokensMixin.pad_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpecialTokensMixin.pad_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>pad_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by attention mechanisms or loss computation.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SpecialTokensMixin.cls_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpecialTokensMixin.cls_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>cls_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing the class of the input (used by BERT for instance).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SpecialTokensMixin.mask_token" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpecialTokensMixin.mask_token"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>mask_token</strong> (<code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A special token representing a masked token (used by masked-language modeling pretraining objectives, like BERT).<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SpecialTokensMixin.additional_special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpecialTokensMixin.additional_special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>additional_special_tokens</strong> (tuple or list of <code>str</code> or <code>tokenizers.AddedToken</code>, <em>optional</em>) &#x2014; A tuple or a list of additional special tokens.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>A mixin derived by <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizer">PreTrainedTokenizer</a> and <a href="/docs/transformers/pr_16143/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast">PreTrainedTokenizerFast</a> to handle specific behaviors related to special tokens. In particular, this class hold the attributes which can be used to directly access these special tokens in a model-independent manner and allow to set and update the special tokens.</p> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.SpecialTokensMixin.add_special_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>add_special_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.SpecialTokensMixin.add_special_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.SpecialTokensMixin.add_special_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L833" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">special_tokens_dict<span class="opacity-60">: typing.Dict[str, typing.Union[str, tokenizers.AddedToken]]</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>int</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SpecialTokensMixin.add_special_tokens.special_tokens_dict" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpecialTokensMixin.add_special_tokens.special_tokens_dict"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>special_tokens_dict</strong> (dictionary <em>str</em> to <em>str</em> or <code>tokenizers.AddedToken</code>) &#x2014; Keys should be in the list of predefined special attributes: [<code>bos_token</code>, <code>eos_token</code>, <code>unk_token</code>, <code>sep_token</code>, <code>pad_token</code>, <code>cls_token</code>, <code>mask_token</code>, <code>additional_special_tokens</code>].</p> <p>Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer assign the index of the <code>unk_token</code> to them).<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.SpecialTokensMixin.add_special_tokens.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>int</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Number of tokens added to the vocabulary.</p> <!-- HTML_TAG_END --></p></div></div> <p>Add a dictionary of special tokens (eos, pad, cls, etc.) to the encoder and link them to class attributes. If special tokens are NOT in the vocabulary, they are added to it (indexed starting from the last index of the current vocabulary).</p> <p>Note,None When adding new tokens to the vocabulary, you should make sure to also resize the token embedding matrix of the model so that its embedding matrix matches the tokenizer.</p> <p>In order to do that, please use the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.resize_token_embeddings">resize_token_embeddings()</a> method.</p> <p>Using <code>add_special_tokens</code> will ensure your special tokens can be used in several ways:</p> <ul><li>Special tokens are carefully handled by the tokenizer (they are never split).</li> <li>You can easily refer to special tokens using tokenizer class attributes like <code>tokenizer.cls_token</code>. This makes it easy to develop model-agnostic training and fine-tuning scripts.</li></ul> <p>When possible, special tokens are already registered for provided pretrained models (for instance <a href="/docs/transformers/pr_16143/en/model_doc/bert#transformers.BertTokenizer">BertTokenizer</a> <code>cls_token</code> is already registered to be :obj<em>’[CLS]’</em> and XLM’s one is also registered to be <code>&#39;&lt;/s&gt;&#39;</code>).</p> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-comment"># Let&#x27;s see how to add a new classification token to GPT-2</span> tokenizer = GPT2Tokenizer.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) model = GPT2Model.from_pretrained(<span class="hljs-string">&quot;gpt2&quot;</span>) special_tokens_dict = {<span class="hljs-string">&quot;cls_token&quot;</span>: <span class="hljs-string">&quot;&lt;CLS&gt;&quot;</span>} num_added_toks = tokenizer.add_special_tokens(special_tokens_dict) <span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;We have added&quot;</span>, num_added_toks, <span class="hljs-string">&quot;tokens&quot;</span>) <span class="hljs-comment"># Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer.</span> model.resize_token_embeddings(<span class="hljs-built_in">len</span>(tokenizer)) <span class="hljs-keyword">assert</span> tokenizer.cls_token == <span class="hljs-string">&quot;&lt;CLS&gt;&quot;</span><!-- HTML_TAG_END --></pre></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.SpecialTokensMixin.add_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>add_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.SpecialTokensMixin.add_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.SpecialTokensMixin.add_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L905" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">new_tokens<span class="opacity-60">: typing.Union[str, tokenizers.AddedToken, typing.List[typing.Union[str, tokenizers.AddedToken]]]</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">special_tokens<span class="opacity-60">: bool = False</span></span> </span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>int</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SpecialTokensMixin.add_tokens.new_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpecialTokensMixin.add_tokens.new_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>new_tokens</strong> (<code>str</code>, <code>tokenizers.AddedToken</code> or a list of <em>str</em> or <code>tokenizers.AddedToken</code>) &#x2014; Tokens are only added if they are not already in the vocabulary. <code>tokenizers.AddedToken</code> wraps a string token to let you personalize its behavior: whether this token should only match against a single word, whether this token should strip all potential whitespaces on the left side, whether this token should strip all potential whitespaces on the right side, etc.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.SpecialTokensMixin.add_tokens.special_tokens" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.SpecialTokensMixin.add_tokens.special_tokens"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>special_tokens</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014; Can be used to specify if the token is a special token. This mostly change the normalization behavior (special tokens like CLS or [MASK] are usually not lower-cased for instance).</p> <p>See details for <code>tokenizers.AddedToken</code> in HuggingFace tokenizers library.<!-- HTML_TAG_END --> </span></span> </li></ul> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.SpecialTokensMixin.add_tokens.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>int</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>Number of tokens added to the vocabulary.</p> <!-- HTML_TAG_END --></p></div></div> <p>Add a list of new tokens to the tokenizer class. If the new tokens are not in the vocabulary, they are added to it with indices starting from length of the current vocabulary.</p> <p>Note,None When adding new tokens to the vocabulary, you should make sure to also resize the token embedding matrix of the model so that its embedding matrix matches the tokenizer.</p> <p>In order to do that, please use the <a href="/docs/transformers/pr_16143/en/main_classes/model#transformers.PreTrainedModel.resize_token_embeddings">resize_token_embeddings()</a> method.</p> <p>Examples:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-comment"># Let&#x27;s see how to increase the vocabulary of Bert model and tokenizer</span> tokenizer = BertTokenizerFast.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>) model = BertModel.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>) num_added_toks = tokenizer.add_tokens([<span class="hljs-string">&quot;new_tok1&quot;</span>, <span class="hljs-string">&quot;my_new-tok2&quot;</span>]) <span class="hljs-built_in">print</span>(<span class="hljs-string">&quot;We have added&quot;</span>, num_added_toks, <span class="hljs-string">&quot;tokens&quot;</span>) <span class="hljs-comment"># Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer.</span> model.resize_token_embeddings(<span class="hljs-built_in">len</span>(tokenizer))<!-- HTML_TAG_END --></pre></div></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.SpecialTokensMixin.sanitize_special_tokens"><!-- HTML_TAG_START --><h4 class="!m-0"><span class="flex-1 rounded-xl py-0.5 break-all bg-gradient-to-r from-blue-50/60 to-white dark:from-gray-900 dark:to-gray-950 text-blue-700 dark:text-blue-300 font-medium px-2"><svg width="1em" height="1em" viewBox="0 0 32 33" class="mr-1 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg"><path d="M5.80566 18.3545C4.90766 17.4565 4.90766 16.0005 5.80566 15.1025L14.3768 6.53142C15.2748 5.63342 16.7307 5.63342 17.6287 6.53142L26.1999 15.1025C27.0979 16.0005 27.0979 17.4565 26.1999 18.3545L17.6287 26.9256C16.7307 27.8236 15.2748 27.8236 14.3768 26.9256L5.80566 18.3545Z" fill="currentColor" fill-opacity="0.25"/><path fill-rule="evenodd" clip-rule="evenodd" d="M16.4801 13.9619C16.4801 12.9761 16.7467 12.5436 16.9443 12.3296C17.1764 12.078 17.5731 11.8517 18.2275 11.707C18.8821 11.5623 19.638 11.5342 20.4038 11.5582C20.7804 11.57 21.1341 11.5932 21.4719 11.6156L21.5263 11.6193C21.8195 11.6389 22.1626 11.6618 22.4429 11.6618V7.40825C22.3209 7.40825 22.1219 7.39596 21.7544 7.37149C21.4202 7.34925 20.9976 7.32115 20.5371 7.30672C19.6286 7.27824 18.4672 7.29779 17.3093 7.55377C16.1512 7.8098 14.8404 8.33724 13.8181 9.4452C12.7612 10.5907 12.2266 12.1236 12.2266 13.9619V15.0127H10.6836V19.2662H12.2266V26.6332H16.4801V19.2662H20.3394V15.0127H16.4801V13.9619Z" fill="currentColor"/></svg>sanitize_special_tokens</span></h4><!-- HTML_TAG_END --> <a id="transformers.SpecialTokensMixin.sanitize_special_tokens" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.SpecialTokensMixin.sanitize_special_tokens"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L821" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span>)</span> <span class="font-bold">→</span> <span class="rounded hover:bg-gray-400 cursor-pointer"><!-- HTML_TAG_START --><span><code>int</code></span><!-- HTML_TAG_END --></span></p> <div class="!mb-10 relative docstring-details "> <div class="flex items-center font-semibold space-x-3 text-base !mt-0 !mb-0 text-gray-800" id="transformers.SpecialTokensMixin.sanitize_special_tokens.returns"><p class="text-base">Returns</p> <!-- HTML_TAG_START --> <p><code>int</code></p> <!-- HTML_TAG_END --> <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700"></span></div> <p class="text-base"><!-- HTML_TAG_START --> <p>The number of tokens added in the vocabulary during the operation.</p> <!-- HTML_TAG_END --></p></div></div> <p>Make sure that all the special tokens attributes of the tokenizer (<code>tokenizer.mask_token</code>, <code>tokenizer.cls_token</code>, etc.) are in the vocabulary.</p> <p>Add the missing ones to the vocabulary if needed.</p></div></div> <h2 class="relative group"><a id="transformers.tokenization_utils_base.TruncationStrategy" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.tokenization_utils_base.TruncationStrategy"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Enums and namedtuples </span></h2> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.tokenization_utils_base.TruncationStrategy"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.tokenization_utils_base.</span><span class="font-semibold">TruncationStrategy</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.tokenization_utils_base.TruncationStrategy" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.tokenization_utils_base.TruncationStrategy"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L129" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">value<span class="opacity-60"></span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">names<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">module<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">qualname<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">type<span class="opacity-60"> = None</span></span> </span><span class="comma cursor-default"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start<span class="opacity-60"> = 1</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> </div></div> <p>Possible values for the <code>truncation</code> argument in <a href="/docs/transformers/pr_16143/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.__call__">PreTrainedTokenizerBase.<strong>call</strong>()</a>. Useful for tab-completion in an IDE.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.CharSpan"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">CharSpan</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.CharSpan" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.CharSpan"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L141" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">end<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.CharSpan.start" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.CharSpan.start"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start</strong> (<code>int</code>) &#x2014; Index of the first character in the original string.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.CharSpan.end" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.CharSpan.end"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>end</strong> (<code>int</code>) &#x2014; Index of the character following the last character in the original string.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Character span in the original string.</p></div> <div class="docstring"><div><span class="group flex space-x-1.5 items-center text-gray-800 bg-gradient-to-r rounded-tr-lg -mt-4 -ml-4 pt-3 px-2.5" id="transformers.TokenSpan"><!-- HTML_TAG_START --><h3 class="!m-0"><span class="flex-1 break-all md:text-lg bg-gradient-to-r px-2.5 py-1.5 rounded-xl from-indigo-50/70 to-white dark:from-gray-900 dark:to-gray-950 dark:text-indigo-300 text-indigo-700"><svg class="mr-1.5 text-indigo-500 inline-block -mt-0.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width=".8em" height=".8em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg><span class="font-light">class</span> <span class="font-medium">transformers.</span><span class="font-semibold">TokenSpan</span></span></h3><!-- HTML_TAG_END --> <a id="transformers.TokenSpan" class="header-link invisible with-hover:group-hover:visible pr-2" href="#transformers.TokenSpan"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></a> <a class="!ml-auto !text-gray-400 !no-underline text-sm flex items-center" href="https://github.com/huggingface/transformers/blob/pr_16143/src/transformers/tokenization_utils_base.py#L154" target="_blank"><span>&lt;</span> <span class="hidden md:block mx-0.5 hover:!underline">source</span> <span>&gt;</span></a></span> <p class="font-mono text-xs md:text-sm !leading-relaxed !my-6"><span>(</span> <span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">start<span class="opacity-60">: int</span></span> </span><span class="comma cursor-pointer"><span class="rounded hover:bg-black hover:text-white dark:hover:bg-white dark:hover:text-black">end<span class="opacity-60">: int</span></span> </span> <span>)</span> </p> <div class="!mb-10 relative docstring-details "> <p class="flex items-center font-semibold !mt-2 !mb-2 text-gray-800">Parameters <span class="flex-auto border-t-2 border-gray-100 dark:border-gray-700 ml-3"></span></p> <ul class="px-2"><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenSpan.start" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenSpan.start"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>start</strong> (<code>int</code>) &#x2014; Index of the first token in the span.<!-- HTML_TAG_END --> </span></span> </li><li class="text-base !pl-4 my-3"><span class="group flex space-x-1.5 items-start"><a id="transformers.TokenSpan.end" class="header-link block pr-0.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers.TokenSpan.end"><span><svg class="text-smd" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span><!-- HTML_TAG_START --><strong>end</strong> (<code>int</code>) &#x2014; Index of the token following the last token in the span.<!-- HTML_TAG_END --> </span></span> </li></ul> </div></div> <p>Token span in an encoded string (list of tokens).</p></div> <script type="module" data-hydrate="1wpsj13"> import { start } from "/docs/transformers/pr_16143/en/_app/start-7cd6c1d0.js"; start({ target: document.querySelector('[data-hydrate="1wpsj13"]').parentNode, paths: {"base":"/docs/transformers/pr_16143/en","assets":"/docs/transformers/pr_16143/en"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_16143/en/_app/pages/__layout.svelte-52826f4d.js"), import("/docs/transformers/pr_16143/en/_app/pages/internal/tokenization_utils.mdx-eeb10b9c.js") ], params: {} } }); </script>
455
0
hf_public_repos/doc-build-dev/transformers/pr_18789
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/installation.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;guia-de-instalao&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;instalao-pelo-pip&quot;,&quot;title&quot;:&quot;Instalação pelo Pip&quot;},{&quot;local&quot;:&quot;instalao-usando-a-fonte&quot;,&quot;title&quot;:&quot;Instalação usando a fonte&quot;},{&quot;local&quot;:&quot;instalao-editvel&quot;,&quot;title&quot;:&quot;Instalação editável&quot;},{&quot;local&quot;:&quot;instalao-usando-o-conda&quot;,&quot;title&quot;:&quot;Instalação usando o Conda&quot;},{&quot;local&quot;:&quot;configurao-do-cach&quot;,&quot;title&quot;:&quot;Configuração do Cachê&quot;},{&quot;local&quot;:&quot;modo-offline&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;obtendo-modelos-e-tokenizers-para-uso-offline&quot;,&quot;title&quot;:&quot;Obtendo modelos e tokenizers para uso offline&quot;}],&quot;title&quot;:&quot;Modo Offline&quot;}],&quot;title&quot;:&quot;Guia de Instalação&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/pages/installation.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/Tip-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/IconCopyLink-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/CodeBlock-hf-doc-builder.js"> <h1 class="relative group"><a id="guia-de-instalao" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#guia-de-instalao"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Guia de Instalação </span></h1> <p>Neste guia poderá encontrar informações para a instalação do 🤗 Transformers para qualquer biblioteca de Machine Learning com a qual esteja a trabalhar. Além disso, poderá encontrar informações sobre como gerar cachês e configurar o 🤗 Transformers para execução em modo offline (opcional).</p> <p>🤗 Transformers foi testado com Python 3.6+, PyTorch 1.1.0+, TensorFlow 2.0+, e Flax. Para instalar a biblioteca de deep learning com que deseja trabalhar, siga as instruções correspondentes listadas a seguir:</p> <ul><li><a href="https://pytorch.org/get-started/locally/" rel="nofollow">PyTorch</a></li> <li><a href="https://www.tensorflow.org/install/pip" rel="nofollow">TensorFlow 2.0</a></li> <li><a href="https://flax.readthedocs.io/en/latest/" rel="nofollow">Flax</a></li></ul> <h2 class="relative group"><a id="instalao-pelo-pip" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#instalao-pelo-pip"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Instalação pelo Pip </span></h2> <p>É sugerido instalar o 🤗 Transformers num <a href="https://docs.python.org/3/library/venv.html" rel="nofollow">ambiente virtual</a>. Se precisar de mais informações sobre ambientes virtuais em Python, consulte este <a href="https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/" rel="nofollow">guia</a>. Um ambiente virtual facilitará a manipulação e organização de projetos e evita problemas de compatibilidade entre dependências.</p> <p>Comece criando um ambiente virtual no diretório do seu projeto:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->python -m venv .<span class="hljs-built_in">env</span><!-- HTML_TAG_END --></pre></div> <p>E para ativar o ambiente virtual:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-built_in">source</span> .<span class="hljs-built_in">env</span>/bin/activate<!-- HTML_TAG_END --></pre></div> <p>Agora É possível instalar o 🤗 Transformers com o comando a seguir:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install transformers<!-- HTML_TAG_END --></pre></div> <p>Somente para a CPU, é possível instalar o 🤗 Transformers e a biblioteca de deep learning respectiva apenas numa linha.</p> <p>Por exemplo, para instalar o 🤗 Transformers e o PyTorch, digite:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install transformers[torch]<!-- HTML_TAG_END --></pre></div> <p>🤗 Transformers e TensorFlow 2.0:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install transformers[tf-cpu]<!-- HTML_TAG_END --></pre></div> <p>🤗 Transformers e Flax:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install transformers[flax]<!-- HTML_TAG_END --></pre></div> <p>Por último, verifique se o 🤗 Transformers foi instalado com sucesso usando o seguinte comando para baixar um modelo pré-treinado:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->python -c <span class="hljs-string">&quot;from transformers import pipeline; print(pipeline(&#x27;sentiment-analysis&#x27;)(&#x27;we love you&#x27;))&quot;</span><!-- HTML_TAG_END --></pre></div> <p>Em seguida, imprima um rótulo e sua pontuação:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->[{<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;POSITIVE&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: 0.9998704791069031}]<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="instalao-usando-a-fonte" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#instalao-usando-a-fonte"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Instalação usando a fonte </span></h2> <p>Para instalar o 🤗 Transformers a partir da fonte use o seguinte comando:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install git+https://github.com/huggingface/transformers<!-- HTML_TAG_END --></pre></div> <p>O comando acima instalará a versão <code>master</code> mais atual em vez da última versão estável. A versão <code>master</code> é útil para utilizar os últimos updates contidos em 🤗 Transformers. Por exemplo, um erro recente pode ter sido corrigido somente após a última versão estável, antes que houvesse um novo lançamento. No entanto, há a possibilidade que a versão <code>master</code> não esteja estável. A equipa trata de mantér a versão <code>master</code> operacional e a maioria dos erros são resolvidos em poucas horas ou dias. Se encontrar quaisquer problemas, por favor abra um <a href="https://github.com/huggingface/transformers/issues" rel="nofollow">Issue</a> para que o mesmo possa ser corrigido o mais rápido possível.</p> <p>Verifique que o 🤗 Transformers está instalado corretamente usando o seguinte comando:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->python -c <span class="hljs-string">&quot;from transformers import pipeline; print(pipeline(&#x27;sentiment-analysis&#x27;)(&#x27;I love you&#x27;))&quot;</span><!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="instalao-editvel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#instalao-editvel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Instalação editável </span></h2> <p>Uma instalação editável será necessária caso desejas um dos seguintes:</p> <ul><li>Usar a versão <code>master</code> do código fonte.</li> <li>Contribuir ao 🤗 Transformers e precisa testar mudanças ao código.</li></ul> <p>Para tal, clone o repositório e instale o 🤗 Transformers com os seguintes comandos:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->git <span class="hljs-built_in">clone</span> https://github.com/huggingface/transformers.git <span class="hljs-built_in">cd</span> transformers pip install -e .<!-- HTML_TAG_END --></pre></div> <p>Estes comandos vão ligar o diretório para o qual foi clonado o repositório ao caminho de bibliotecas do Python. O Python agora buscará dentro dos arquivos que foram clonados além dos caminhos normais da biblioteca. Por exemplo, se os pacotes do Python se encontram instalados no caminho <code>~/anaconda3/envs/main/lib/python3.7/site-packages/</code>, o Python também buscará módulos no diretório onde clonamos o repositório <code>~/transformers/</code>.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"><p>É necessário manter o diretório <code>transformers</code> se desejas continuar usando a biblioteca.</p></div> <p>Assim, É possível atualizar sua cópia local para com a última versão do 🤗 Transformers com o seguinte comando:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-built_in">cd</span> ~/transformers/ git pull<!-- HTML_TAG_END --></pre></div> <p>O ambiente de Python que foi criado para a instalação do 🤗 Transformers encontrará a versão <code>master</code> em execuções seguintes.</p> <h2 class="relative group"><a id="instalao-usando-o-conda" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#instalao-usando-o-conda"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Instalação usando o Conda </span></h2> <p>É possível instalar o 🤗 Transformers a partir do canal conda <code>huggingface</code> com o seguinte comando:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->conda install -c huggingface transformers<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="configurao-do-cach" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#configurao-do-cach"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Configuração do Cachê </span></h2> <p>Os modelos pré-treinados são baixados e armazenados no cachê local, encontrado em <code>~/.cache/huggingface/transformers/</code>. Este é o diretório padrão determinado pela variável <code>TRANSFORMERS_CACHE</code> dentro do shell. No Windows, este diretório pré-definido é dado por <code>C:\Users\username\.cache\huggingface\transformers</code>. É possível mudar as variáveis dentro do shell em ordem de prioridade para especificar um diretório de cachê diferente:</p> <ol><li>Variável de ambiente do shell (por padrão): <code>TRANSFORMERS_CACHE</code>.</li> <li>Variável de ambiente do shell:<code>HF_HOME</code> + <code>transformers/</code>.</li> <li>Variável de ambiente do shell: <code>XDG_CACHE_HOME</code> + <code>/huggingface/transformers</code>.</li></ol> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>O 🤗 Transformers usará as variáveis de ambiente do shell <code>PYTORCH_TRANSFORMERS_CACHE</code> ou <code>PYTORCH_PRETRAINED_BERT_CACHE</code> se estiver vindo de uma versão anterior da biblioteca que tenha configurado essas variáveis de ambiente, a menos que você especifique a variável de ambiente do shell <code>TRANSFORMERS_CACHE</code>.</p></div> <h2 class="relative group"><a id="modo-offline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#modo-offline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Modo Offline </span></h2> <p>O 🤗 Transformers também pode ser executado num ambiente de firewall ou fora da rede (offline) usando arquivos locais. Para tal, configure a variável de ambiente de modo que <code>TRANSFORMERS_OFFLINE=1</code>.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Você pode adicionar o <a href="https://huggingface.co/docs/datasets/" rel="nofollow">🤗 Datasets</a> ao pipeline de treinamento offline declarando a variável de ambiente <code>HF_DATASETS_OFFLINE=1</code>.</p></div> <p>Segue um exemplo de execução do programa numa rede padrão com firewall para instâncias externas, usando o seguinte comando:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->python examples/pytorch/translation/run_translation.py --model_name_or_path t5-small --dataset_name wmt16 --dataset_config ro-en ...<!-- HTML_TAG_END --></pre></div> <p>Execute esse mesmo programa numa instância offline com o seguinte comando:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->HF_DATASETS_OFFLINE=1 TRANSFORMERS_OFFLINE=1 \ python examples/pytorch/translation/run_translation.py --model_name_or_path t5-small --dataset_name wmt16 --dataset_config ro-en ...<!-- HTML_TAG_END --></pre></div> <p>O script agora deve ser executado sem travar ou expirar, pois procurará apenas por arquivos locais.</p> <h3 class="relative group"><a id="obtendo-modelos-e-tokenizers-para-uso-offline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#obtendo-modelos-e-tokenizers-para-uso-offline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Obtendo modelos e tokenizers para uso offline </span></h3> <p>Outra opção para usar o 🤗 Transformers offline é baixar os arquivos antes e depois apontar para o caminho local onde estão localizados. Existem três maneiras de fazer isso:</p> <ul><li><p>Baixe um arquivo por meio da interface de usuário do <a href="https://huggingface.co/models" rel="nofollow">Model Hub</a> clicando no ícone ↓.</p> <p><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/download-icon.png" alt="download-icon"></p></li></ul> <ul><li><p>Use o pipeline do <code>PreTrainedModel.from_pretrained()</code> e <code>PreTrainedModel.save_pretrained()</code>:</p> <ol><li><p>Baixa os arquivos previamente com <code>PreTrainedModel.from_pretrained()</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, AutoModelForSeq2SeqLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;bigscience/T0_3B&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;bigscience/T0_3B&quot;</span>)<!-- HTML_TAG_END --></pre></div></li></ol></li></ul> <ol start="2"><li><p>Salve os arquivos em um diretório específico com <code>PreTrainedModel.save_pretrained()</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.save_pretrained(<span class="hljs-string">&quot;./your/path/bigscience_t0&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.save_pretrained(<span class="hljs-string">&quot;./your/path/bigscience_t0&quot;</span>)<!-- HTML_TAG_END --></pre></div></li> <li><p>Quando estiver offline, acesse os arquivos com <code>PreTrainedModel.from_pretrained()</code> do diretório especificado:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;./your/path/bigscience_t0&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModel.from_pretrained(<span class="hljs-string">&quot;./your/path/bigscience_t0&quot;</span>)<!-- HTML_TAG_END --></pre></div></li></ol> <ul><li><p>Baixando arquivos programaticamente com a biblioteca <a href="https://github.com/huggingface/huggingface_hub/tree/main/src/huggingface_hub" rel="nofollow">huggingface_hub</a>:</p> <ol><li><p>Instale a biblioteca <a href="https://github.com/huggingface/huggingface_hub/tree/main/src/huggingface_hub" rel="nofollow">huggingface_hub</a> em seu ambiente virtual:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->python -m pip install huggingface_hub<!-- HTML_TAG_END --></pre></div></li> <li><p>Utiliza a função <a href="https://huggingface.co/docs/hub/adding-a-library#download-files-from-the-hub" rel="nofollow"><code>hf_hub_download</code></a> para baixar um arquivo para um caminho específico. Por exemplo, o comando a seguir baixará o arquivo <code>config.json</code> para o modelo <a href="https://huggingface.co/bigscience/T0_3B" rel="nofollow">T0</a> no caminho desejado:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> huggingface_hub <span class="hljs-keyword">import</span> hf_hub_download <span class="hljs-meta">&gt;&gt;&gt; </span>hf_hub_download(repo_id=<span class="hljs-string">&quot;bigscience/T0_3B&quot;</span>, filename=<span class="hljs-string">&quot;config.json&quot;</span>, cache_dir=<span class="hljs-string">&quot;./your/path/bigscience_t0&quot;</span>)<!-- HTML_TAG_END --></pre></div></li></ol></li></ul> <p>Depois que o arquivo for baixado e armazenado no cachê local, especifique seu caminho local para carregá-lo e usá-lo:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&quot;./your/path/bigscience_t0/config.json&quot;</span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Para obter mais detalhes sobre como baixar arquivos armazenados no Hub, consulte a seção <a href="https://huggingface.co/docs/hub/how-to-downstream" rel="nofollow">How to download files from the Hub</a>.</p></div> <script type="module" data-hydrate="gyblmw"> import { start } from "/docs/transformers/pr_18789/pt/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="gyblmw"]').parentNode, paths: {"base":"/docs/transformers/pr_18789/pt","assets":"/docs/transformers/pr_18789/pt"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_18789/pt/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_18789/pt/_app/pages/installation.mdx-hf-doc-builder.js") ], params: {} } }); </script>
456
0
hf_public_repos/doc-build-dev/transformers/pr_18789
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/_toctree.yml
- sections: - local: index title: 🤗 Transformers - local: quicktour title: Tour rápido - local: installation title: Instalação title: Início - sections: - local: pipeline_tutorial title: Pipelines para inferência - local: training title: Fine-tuning de um modelo pré-treinado - local: accelerate title: Treinamento distribuído com 🤗 Accelerate title: Tutoriais - sections: - local: fast_tokenizers title: Usando os Tokenizers do 🤗 Tokenizers - local: create_a_model title: Criando uma arquitetura customizada - sections: - local: tasks/sequence_classification title: Classificação de texto - local: tasks/token_classification title: Classificação de tokens title: Fine-tuning para tarefas específicas - local: multilingual title: Modelos multilinguísticos para inferência title: Guias práticos
457
0
hf_public_repos/doc-build-dev/transformers/pr_18789
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/multilingual.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;modelos-multilingusticos-para-inferncia&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;xlm&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;xlm-com-language-embeddings&quot;,&quot;title&quot;:&quot;XLM com language embeddings&quot;},{&quot;local&quot;:&quot;xlm-sem-language-embeddings&quot;,&quot;title&quot;:&quot;XLM sem language embeddings&quot;}],&quot;title&quot;:&quot;XLM&quot;},{&quot;local&quot;:&quot;bert&quot;,&quot;title&quot;:&quot;BERT&quot;},{&quot;local&quot;:&quot;xlmroberta&quot;,&quot;title&quot;:&quot;XLM-RoBERTa&quot;},{&quot;local&quot;:&quot;m2m100&quot;,&quot;title&quot;:&quot;M2M100&quot;},{&quot;local&quot;:&quot;mbart&quot;,&quot;title&quot;:&quot;MBart&quot;}],&quot;title&quot;:&quot;Modelos multilinguísticos para inferência&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/pages/multilingual.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/IconCopyLink-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/CodeBlock-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/DocNotebookDropdown-hf-doc-builder.js"> <h1 class="relative group"><a id="modelos-multilingusticos-para-inferncia" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#modelos-multilingusticos-para-inferncia"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Modelos multilinguísticos para inferência </span></h1> <div class="flex space-x-1 absolute z-10 right-0 top-0"> <div class="relative colab-dropdown "> <button class=" " type="button"> <img alt="Open In Colab" class="!m-0" src="https://colab.research.google.com/assets/colab-badge.svg"> </button> </div> <div class="relative colab-dropdown "> <button class=" " type="button"> <img alt="Open In Studio Lab" class="!m-0" src="https://studiolab.sagemaker.aws/studiolab.svg"> </button> </div></div> <p>Existem vários modelos multilinguísticos no 🤗 Transformers e seus usos para inferência diferem dos modelos monolíngues. No entanto, nem <em>todos</em> os usos dos modelos multilíngues são tão diferentes. Alguns modelos, como o <a href="https://huggingface.co/bert-base-multilingual-uncased" rel="nofollow">bert-base-multilingual-uncased</a>, podem ser usados como se fossem monolíngues. Este guia irá te ajudar a usar modelos multilíngues cujo uso difere para o propósito de inferência.</p> <h2 class="relative group"><a id="xlm" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#xlm"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>XLM </span></h2> <p>O XLM tem dez checkpoints diferentes dos quais apenas um é monolíngue. Os nove checkpoints restantes do modelo são subdivididos em duas categorias: checkpoints que usam de language embeddings e os que não.</p> <h3 class="relative group"><a id="xlm-com-language-embeddings" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#xlm-com-language-embeddings"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>XLM com language embeddings </span></h3> <p>Os seguintes modelos de XLM usam language embeddings para especificar a linguagem utilizada para a inferência.</p> <ul><li><code>xlm-mlm-ende-1024</code> (Masked language modeling, English-German)</li> <li><code>xlm-mlm-enfr-1024</code> (Masked language modeling, English-French)</li> <li><code>xlm-mlm-enro-1024</code> (Masked language modeling, English-Romanian)</li> <li><code>xlm-mlm-xnli15-1024</code> (Masked language modeling, XNLI languages)</li> <li><code>xlm-mlm-tlm-xnli15-1024</code> (Masked language modeling + translation, XNLI languages)</li> <li><code>xlm-clm-enfr-1024</code> (Causal language modeling, English-French)</li> <li><code>xlm-clm-ende-1024</code> (Causal language modeling, English-German)</li></ul> <p>Os language embeddings são representados por um tensor de mesma dimensão que os <code>input_ids</code> passados ao modelo. Os valores destes tensores dependem do idioma utilizado e se identificam pelos atributos <code>lang2id</code> e <code>id2lang</code> do tokenizador.</p> <p>Neste exemplo, carregamos o checkpoint <code>xlm-clm-enfr-1024</code>(Causal language modeling, English-French):</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLMTokenizer, XLMWithLMHeadModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLMTokenizer.from_pretrained(<span class="hljs-string">&quot;xlm-clm-enfr-1024&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = XLMWithLMHeadModel.from_pretrained(<span class="hljs-string">&quot;xlm-clm-enfr-1024&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>O atributo <code>lang2id</code> do tokenizador mostra os idiomas deste modelo e seus ids:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(tokenizer.lang2id) {<span class="hljs-string">&#x27;en&#x27;</span>: <span class="hljs-number">0</span>, <span class="hljs-string">&#x27;fr&#x27;</span>: <span class="hljs-number">1</span>}<!-- HTML_TAG_END --></pre></div> <p>Em seguida, cria-se um input de exemplo:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = torch.tensor([tokenizer.encode(<span class="hljs-string">&quot;Wikipedia was used to&quot;</span>)]) <span class="hljs-comment"># batch size of 1</span><!-- HTML_TAG_END --></pre></div> <p>Estabelece-se o id do idioma, por exemplo <code>&quot;en&quot;</code>, e utiliza-se o mesmo para definir a language embedding. A language embedding é um tensor preenchido com <code>0</code>, que é o id de idioma para o inglês. Este tensor deve ser do mesmo tamanho que os <code>input_ids</code>.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>language_id = tokenizer.lang2id[<span class="hljs-string">&quot;en&quot;</span>] <span class="hljs-comment"># 0</span> <span class="hljs-meta">&gt;&gt;&gt; </span>langs = torch.tensor([language_id] * input_ids.shape[<span class="hljs-number">1</span>]) <span class="hljs-comment"># torch.tensor([0, 0, 0, ..., 0])</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># We reshape it to be of size (batch_size, sequence_length)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>langs = langs.view(<span class="hljs-number">1</span>, -<span class="hljs-number">1</span>) <span class="hljs-comment"># is now of shape [1, sequence_length] (we have a batch size of 1)</span><!-- HTML_TAG_END --></pre></div> <p>Agora você pode passar os <code>input_ids</code> e a language embedding ao modelo:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids, langs=langs)<!-- HTML_TAG_END --></pre></div> <p>O script <a href="https://github.com/huggingface/transformers/tree/master/examples/pytorch/text-generation/run_generation.py" rel="nofollow">run_generation.py</a> pode gerar um texto com language embeddings utilizando os checkpoints <code>xlm-clm</code>.</p> <h3 class="relative group"><a id="xlm-sem-language-embeddings" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#xlm-sem-language-embeddings"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>XLM sem language embeddings </span></h3> <p>Os seguintes modelos XLM não requerem o uso de language embeddings durante a inferência:</p> <ul><li><code>xlm-mlm-17-1280</code> (Modelagem de linguagem com máscara, 17 idiomas)</li> <li><code>xlm-mlm-100-1280</code> (Modelagem de linguagem com máscara, 100 idiomas)</li></ul> <p>Estes modelos são utilizados para representações genéricas de frase diferentemente dos checkpoints XLM anteriores.</p> <h2 class="relative group"><a id="bert" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#bert"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>BERT </span></h2> <p>Os seguintes modelos do BERT podem ser utilizados para tarefas multilinguísticas:</p> <ul><li><code>bert-base-multilingual-uncased</code> (Modelagem de linguagem com máscara + Previsão de frases, 102 idiomas)</li> <li><code>bert-base-multilingual-cased</code> (Modelagem de linguagem com máscara + Previsão de frases, 104 idiomas)</li></ul> <p>Estes modelos não requerem language embeddings durante a inferência. Devem identificar a linguagem a partir do contexto e realizar a inferência em sequência.</p> <h2 class="relative group"><a id="xlmroberta" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#xlmroberta"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>XLM-RoBERTa </span></h2> <p>Os seguintes modelos do XLM-RoBERTa podem ser utilizados para tarefas multilinguísticas:</p> <ul><li><code>xlm-roberta-base</code> (Modelagem de linguagem com máscara, 100 idiomas)</li> <li><code>xlm-roberta-large</code> Modelagem de linguagem com máscara, 100 idiomas)</li></ul> <p>O XLM-RoBERTa foi treinado com 2,5 TB de dados do CommonCrawl recém-criados e testados em 100 idiomas. Proporciona fortes vantagens sobre os modelos multilinguísticos publicados anteriormente como o mBERT e o XLM em tarefas subsequentes como a classificação, a rotulagem de sequências e à respostas a perguntas.</p> <h2 class="relative group"><a id="m2m100" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#m2m100"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>M2M100 </span></h2> <p>Os seguintes modelos de M2M100 podem ser utilizados para traduções multilinguísticas:</p> <ul><li><code>facebook/m2m100_418M</code> (Tradução)</li> <li><code>facebook/m2m100_1.2B</code> (Tradução)</li></ul> <p>Neste exemplo, o checkpoint <code>facebook/m2m100_418M</code> é carregado para traduzir do mandarim ao inglês. É possível estabelecer o idioma de origem no tokenizador:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> M2M100ForConditionalGeneration, M2M100Tokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>en_text = <span class="hljs-string">&quot;Do not meddle in the affairs of wizards, for they are subtle and quick to anger.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>chinese_text = <span class="hljs-string">&quot;不要插手巫師的事務, 因為他們是微妙的, 很快就會發怒.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = M2M100Tokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/m2m100_418M&quot;</span>, src_lang=<span class="hljs-string">&quot;zh&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = M2M100ForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/m2m100_418M&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Tokenização do texto:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>encoded_zh = tokenizer(chinese_text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>O M2M100 força o id do idioma de destino como o primeiro token gerado para traduzir ao idioma de destino. É definido o <code>forced_bos_token_id</code> como <code>en</code> no método <code>generate</code> para traduzir ao inglês.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>generated_tokens = model.generate(**encoded_zh, forced_bos_token_id=tokenizer.get_lang_id(<span class="hljs-string">&quot;en&quot;</span>)) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(generated_tokens, skip_special_tokens=<span class="hljs-literal">True</span>) <span class="hljs-string">&#x27;Do not interfere with the matters of the witches, because they are delicate and will soon be angry.&#x27;</span><!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="mbart" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#mbart"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>MBart </span></h2> <p>Os seguintes modelos do MBart podem ser utilizados para tradução multilinguística:</p> <ul><li><code>facebook/mbart-large-50-one-to-many-mmt</code> (Tradução automática multilinguística de um a vários, 50 idiomas)</li> <li><code>facebook/mbart-large-50-many-to-many-mmt</code> (Tradução automática multilinguística de vários a vários, 50 idiomas)</li> <li><code>facebook/mbart-large-50-many-to-one-mmt</code> (Tradução automática multilinguística vários a um, 50 idiomas)</li> <li><code>facebook/mbart-large-50</code> (Tradução multilinguística, 50 idiomas)</li> <li><code>facebook/mbart-large-cc25</code></li></ul> <p>Neste exemplo, carrega-se o checkpoint <code>facebook/mbart-large-50-many-to-many-mmt</code> para traduzir do finlandês ao inglês. Pode-se definir o idioma de origem no tokenizador:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, AutoModelForSeq2SeqLM <span class="hljs-meta">&gt;&gt;&gt; </span>en_text = <span class="hljs-string">&quot;Do not meddle in the affairs of wizards, for they are subtle and quick to anger.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>fi_text = <span class="hljs-string">&quot;Älä sekaannu velhojen asioihin, sillä ne ovat hienovaraisia ja nopeasti vihaisia.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-50-many-to-many-mmt&quot;</span>, src_lang=<span class="hljs-string">&quot;fi_FI&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-50-many-to-many-mmt&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Tokenizando o texto:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>encoded_en = tokenizer(en_text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>O MBart força o id do idioma de destino como o primeiro token gerado para traduzir ao idioma de destino. É definido o <code>forced_bos_token_id</code> como <code>en</code> no método <code>generate</code> para traduzir ao inglês.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>generated_tokens = model.generate(**encoded_en, forced_bos_token_id=tokenizer.lang_code_to_id(<span class="hljs-string">&quot;en_XX&quot;</span>)) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(generated_tokens, skip_special_tokens=<span class="hljs-literal">True</span>) <span class="hljs-string">&quot;Don&#x27;t interfere with the wizard&#x27;s affairs, because they are subtle, will soon get angry.&quot;</span><!-- HTML_TAG_END --></pre></div> <p>Se estiver usando o checkpoint <code>facebook/mbart-large-50-many-to-one-mmt</code> não será necessário forçar o id do idioma de destino como sendo o primeiro token generado, caso contrário a usagem é a mesma.</p> <script type="module" data-hydrate="v20liu"> import { start } from "/docs/transformers/pr_18789/pt/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="v20liu"]').parentNode, paths: {"base":"/docs/transformers/pr_18789/pt","assets":"/docs/transformers/pr_18789/pt"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_18789/pt/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_18789/pt/_app/pages/multilingual.mdx-hf-doc-builder.js") ], params: {} } }); </script>
458
0
hf_public_repos/doc-build-dev/transformers/pr_18789
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/pipeline_tutorial.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;pipelines-para-inferncia&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;uso-do-pipeline&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;selecionando-um-modelo-e-um-tokenizador&quot;,&quot;title&quot;:&quot;Selecionando um modelo e um tokenizador&quot;}],&quot;title&quot;:&quot;Uso do pipeline&quot;},{&quot;local&quot;:&quot;pipeline-de-audio&quot;,&quot;title&quot;:&quot;Pipeline de audio&quot;},{&quot;local&quot;:&quot;pipeline-de-viso-computacional&quot;,&quot;title&quot;:&quot;Pipeline de visão computacional&quot;}],&quot;title&quot;:&quot;Pipelines para inferência&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/pages/pipeline_tutorial.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/Tip-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/IconCopyLink-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/CodeBlock-hf-doc-builder.js"> <h1 class="relative group"><a id="pipelines-para-inferncia" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#pipelines-para-inferncia"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Pipelines para inferência </span></h1> <p>Um [pipeline] simplifica o uso dos modelos no <a href="https://huggingface.co/models" rel="nofollow">Model Hub</a> para a inferência de uma diversidade de tarefas, como a geração de texto, a segmentação de imagens e a classificação de áudio. Inclusive, se não tem experiência com alguma modalidade específica ou não compreende o código que forma os modelos, pode usar eles mesmo assim com o [pipeline]! Este tutorial te ensinará a:</p> <ul><li>Utilizar um <code>pipeline()</code> para inferência.</li> <li>Utilizar um tokenizador ou model específico.</li> <li>Utilizar um <code>pipeline()</code> para tarefas de áudio e visão computacional.</li></ul> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Acesse a documentação do <code>pipeline()</code> para obter uma lista completa de tarefas possíveis.</p></div> <h2 class="relative group"><a id="uso-do-pipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#uso-do-pipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Uso do pipeline </span></h2> <p>Mesmo que cada tarefa tenha um <code>pipeline()</code> associado, é mais simples usar a abstração geral do <code>pipeline()</code> que contém todos os pipelines das tarefas mais específicas. O <code>pipeline()</code> carrega automaticamenta um modelo predeterminado e um tokenizador com capacidade de inferência para sua tarefa.</p> <ol><li>Comece carregando um <code>pipeline()</code> e especifique uma tarefa de inferência:</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-meta">&gt;&gt;&gt; </span>generator = pipeline(task=<span class="hljs-string">&quot;text-generation&quot;</span>)<!-- HTML_TAG_END --></pre></div> <ol start="2"><li>Passe seu dado de entrada, no caso um texto, ao <code>pipeline()</code>:</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>generator(<span class="hljs-string">&quot;Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone&quot;</span>) [{<span class="hljs-string">&#x27;generated_text&#x27;</span>: <span class="hljs-string">&#x27;Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone, Seven for the Iron-priests at the door to the east, and thirteen for the Lord Kings at the end of the mountain&#x27;</span>}]<!-- HTML_TAG_END --></pre></div> <p>Se tiver mais de uma entrada, passe-a como uma lista:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>generator( <span class="hljs-meta">... </span> [ <span class="hljs-meta">... </span> <span class="hljs-string">&quot;Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone&quot;</span>, <span class="hljs-meta">... </span> <span class="hljs-string">&quot;Nine for Mortal Men, doomed to die, One for the Dark Lord on his dark throne&quot;</span>, <span class="hljs-meta">... </span> ] <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <p>Qualquer parâmetro adicional para a sua tarefa também pode ser incluído no <code>pipeline()</code>. A tarefa <code>text-generation</code> tem um método <code>generate()</code> com vários parâmetros para controlar a saída. Por exemplo, se quiser gerar mais de uma saída, defina-a no parâmetro <code>num_return_sequences</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>generator( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone&quot;</span>, <span class="hljs-meta">... </span> num_return_sequences=<span class="hljs-number">2</span>, <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="selecionando-um-modelo-e-um-tokenizador" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#selecionando-um-modelo-e-um-tokenizador"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Selecionando um modelo e um tokenizador </span></h3> <p>O <code>pipeline()</code> aceita qualquer modelo do <a href="https://huggingface.co/models" rel="nofollow">Model Hub</a>. Há rótulos adicionais no Model Hub que te permitem filtrar pelo modelo que gostaria de usar para sua tarefa. Uma vez que tiver escolhido o modelo apropriado, carregue-o com as classes <code>AutoModelFor</code> e [`AutoTokenizer’] correspondentes. Por exemplo, carregue a classe <code>AutoModelForCausalLM</code> para uma tarefa de modelagem de linguagem causal:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, AutoModelForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilgpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForCausalLM.from_pretrained(<span class="hljs-string">&quot;distilgpt2&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Crie uma <code>pipeline()</code> para a sua tarefa e especifíque o modelo e o tokenizador que foram carregados:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-meta">&gt;&gt;&gt; </span>generator = pipeline(task=<span class="hljs-string">&quot;text-generation&quot;</span>, model=model, tokenizer=tokenizer)<!-- HTML_TAG_END --></pre></div> <p>Passe seu texto de entrada ao <code>pipeline()</code> para gerar algum texto:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>generator(<span class="hljs-string">&quot;Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone&quot;</span>) [{<span class="hljs-string">&#x27;generated_text&#x27;</span>: <span class="hljs-string">&#x27;Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone, Seven for the Dragon-lords (for them to rule in a world ruled by their rulers, and all who live within the realm&#x27;</span>}]<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="pipeline-de-audio" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#pipeline-de-audio"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Pipeline de audio </span></h2> <p>A flexibilidade do <code>pipeline()</code> significa que também pode-se extender às tarefas de áudio. La flexibilidad de <code>pipeline()</code> significa que también se puede extender a tareas de audio.</p> <p>Por exemplo, classifiquemos a emoção de um breve fragmento do famoso discurso de John F. Kennedy /home/rzimmerdev/dev/transformers/docs/source/pt/pipeline_tutorial.mdx Encontre um modelo de <a href="https://huggingface.co/models?pipeline_tag=audio-classification" rel="nofollow">audio classification</a> para reconhecimento de emoções no Model Hub e carregue-o usando o <code>pipeline()</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-meta">&gt;&gt;&gt; </span>audio_classifier = pipeline( <span class="hljs-meta">... </span> task=<span class="hljs-string">&quot;audio-classification&quot;</span>, model=<span class="hljs-string">&quot;ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition&quot;</span> <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <p>Passe o arquivo de áudio ao <code>pipeline()</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>audio_classifier(<span class="hljs-string">&quot;jfk_moon_speech.wav&quot;</span>) [{<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;calm&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.13856211304664612</span>}, {<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;disgust&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.13148026168346405</span>}, {<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;happy&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.12635163962841034</span>}, {<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;angry&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.12439591437578201</span>}, {<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;fearful&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.12404385954141617</span>}]<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="pipeline-de-viso-computacional" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#pipeline-de-viso-computacional"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Pipeline de visão computacional </span></h2> <p>Finalmente, utilizar um <code>pipeline()</code> para tarefas de visão é praticamente a mesma coisa. Especifique a sua tarefa de visão e passe a sua imagem ao classificador. A imagem pode ser um link ou uma rota local à imagem. Por exemplo, que espécie de gato está presente na imagem?</p> <p><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" alt="pipeline-cat-chonk"></p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-meta">&gt;&gt;&gt; </span>vision_classifier = pipeline(task=<span class="hljs-string">&quot;image-classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>vision_classifier( <span class="hljs-meta">... </span> images=<span class="hljs-string">&quot;https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg&quot;</span> <span class="hljs-meta">... </span>) [{<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;lynx, catamount&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.4403027892112732</span>}, {<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;cougar, puma, catamount, mountain lion, painter, panther, Felis concolor&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.03433405980467796</span>}, {<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;snow leopard, ounce, Panthera uncia&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.032148055732250214</span>}, {<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;Egyptian cat&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.02353910356760025</span>}, {<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;tiger cat&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.023034192621707916</span>}]<!-- HTML_TAG_END --></pre></div> <script type="module" data-hydrate="hc9ssc"> import { start } from "/docs/transformers/pr_18789/pt/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="hc9ssc"]').parentNode, paths: {"base":"/docs/transformers/pr_18789/pt","assets":"/docs/transformers/pr_18789/pt"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_18789/pt/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_18789/pt/_app/pages/pipeline_tutorial.mdx-hf-doc-builder.js") ], params: {} } }); </script>
459
0
hf_public_repos/doc-build-dev/transformers/pr_18789
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/fast_tokenizers.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;usando-os-tokenizers-do-tokenizers&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;carregando-diretamente-de-um-objeto-tokenizer&quot;,&quot;title&quot;:&quot;Carregando diretamente de um objeto tokenizer&quot;},{&quot;local&quot;:&quot;carregando-de-um-arquivo-json&quot;,&quot;title&quot;:&quot;Carregando de um arquivo JSON&quot;}],&quot;title&quot;:&quot;Usando os Tokenizers do 🤗 Tokenizers&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/pages/fast_tokenizers.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/IconCopyLink-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/CodeBlock-hf-doc-builder.js"> <h1 class="relative group"><a id="usando-os-tokenizers-do-tokenizers" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#usando-os-tokenizers-do-tokenizers"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Usando os Tokenizers do 🤗 Tokenizers </span></h1> <p>O <code>PreTrainedTokenizerFast</code> depende da biblioteca <a href="https://huggingface.co/docs/tokenizers" rel="nofollow">🤗 Tokenizers</a>. O Tokenizer obtido da biblioteca 🤗 Tokenizers pode ser carregado facilmente pelo 🤗 Transformers.</p> <p>Antes de entrar nos detalhes, vamos começar criando um tokenizer fictício em algumas linhas:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> tokenizers <span class="hljs-keyword">import</span> Tokenizer <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> tokenizers.models <span class="hljs-keyword">import</span> BPE <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> tokenizers.trainers <span class="hljs-keyword">import</span> BpeTrainer <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> tokenizers.pre_tokenizers <span class="hljs-keyword">import</span> Whitespace <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = Tokenizer(BPE(unk_token=<span class="hljs-string">&quot;[UNK]&quot;</span>)) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = BpeTrainer(special_tokens=[<span class="hljs-string">&quot;[UNK]&quot;</span>, <span class="hljs-string">&quot;[CLS]&quot;</span>, <span class="hljs-string">&quot;[SEP]&quot;</span>, <span class="hljs-string">&quot;[PAD]&quot;</span>, <span class="hljs-string">&quot;[MASK]&quot;</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.pre_tokenizer = Whitespace() <span class="hljs-meta">&gt;&gt;&gt; </span>files = [...] <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.train(files, trainer)<!-- HTML_TAG_END --></pre></div> <p>Agora temos um tokenizer treinado nos arquivos que foram definidos. Nós podemos continuar usando nessa execução ou salvar em um arquivo JSON para re-utilizar no futuro.</p> <h2 class="relative group"><a id="carregando-diretamente-de-um-objeto-tokenizer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#carregando-diretamente-de-um-objeto-tokenizer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Carregando diretamente de um objeto tokenizer </span></h2> <p>Vamos ver como aproveitar esse objeto tokenizer na biblioteca 🤗 Transformers. A classe <code>PreTrainedTokenizerFast</code> permite uma instanciação fácil, aceitando o objeto <em>tokenizer</em> instanciado como um argumento:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PreTrainedTokenizerFast <span class="hljs-meta">&gt;&gt;&gt; </span>fast_tokenizer = PreTrainedTokenizerFast(tokenizer_object=tokenizer)<!-- HTML_TAG_END --></pre></div> <p>Esse objeto pode ser utilizado com todos os métodos compartilhados pelos tokenizers dos 🤗 Transformers! Vá para <a href="main_classes/tokenizer">a página do tokenizer</a> para mais informações.</p> <h2 class="relative group"><a id="carregando-de-um-arquivo-json" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#carregando-de-um-arquivo-json"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Carregando de um arquivo JSON </span></h2> <p>Para carregar um tokenizer de um arquivo JSON vamos primeiro começar salvando nosso tokenizer:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.save(<span class="hljs-string">&quot;tokenizer.json&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>A pasta para qual salvamos esse arquivo pode ser passada para o método de inicialização do <code>PreTrainedTokenizerFast</code> usando o <code>tokenizer_file</code> parâmetro:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PreTrainedTokenizerFast <span class="hljs-meta">&gt;&gt;&gt; </span>fast_tokenizer = PreTrainedTokenizerFast(tokenizer_file=<span class="hljs-string">&quot;tokenizer.json&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Esse objeto pode ser utilizado com todos os métodos compartilhados pelos tokenizers dos 🤗 Transformers! Vá para <a href="main_classes/tokenizer">a página do tokenizer</a> para mais informações.</p> <script type="module" data-hydrate="epv1kh"> import { start } from "/docs/transformers/pr_18789/pt/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="epv1kh"]').parentNode, paths: {"base":"/docs/transformers/pr_18789/pt","assets":"/docs/transformers/pr_18789/pt"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_18789/pt/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_18789/pt/_app/pages/fast_tokenizers.mdx-hf-doc-builder.js") ], params: {} } }); </script>
460
0
hf_public_repos/doc-build-dev/transformers/pr_18789
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/quicktour.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;tour-rpido&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;pipeline&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;uso-da-pipeline&quot;,&quot;title&quot;:&quot;Uso da pipeline&quot;},{&quot;local&quot;:&quot;use-outro-modelo-e-tokenizer-na-pipeline&quot;,&quot;title&quot;:&quot;Use outro modelo e tokenizer na pipeline&quot;}],&quot;title&quot;:&quot;Pipeline&quot;},{&quot;local&quot;:&quot;autoclass&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;autotokenizer&quot;,&quot;title&quot;:&quot;AutoTokenizer&quot;},{&quot;local&quot;:&quot;automodel&quot;,&quot;title&quot;:&quot;AutoModel&quot;},{&quot;local&quot;:&quot;salvar-um-modelo&quot;,&quot;title&quot;:&quot;Salvar um modelo&quot;}],&quot;title&quot;:&quot;AutoClass&quot;}],&quot;title&quot;:&quot;Tour rápido&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/pages/quicktour.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/Tip-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/Youtube-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/IconCopyLink-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/CodeBlock-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/DocNotebookDropdown-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/Markdown-hf-doc-builder.js"> <h1 class="relative group"><a id="tour-rpido" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#tour-rpido"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Tour rápido </span></h1> <div class="flex space-x-1 absolute z-10 right-0 top-0"> <div class="relative colab-dropdown "> <button class=" " type="button"> <img alt="Open In Colab" class="!m-0" src="https://colab.research.google.com/assets/colab-badge.svg"> </button> </div> <div class="relative colab-dropdown "> <button class=" " type="button"> <img alt="Open In Studio Lab" class="!m-0" src="https://studiolab.sagemaker.aws/studiolab.svg"> </button> </div></div> <p>Comece a trabalhar com 🤗 Transformers! Comece usando <code>pipeline()</code> para rápida inferência e facilmente carregue um modelo pré-treinado e um tokenizer com <a href="./model_doc/auto">AutoClass</a> para resolver tarefas de texto, visão ou áudio.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Todos os exemplos de código apresentados na documentação têm um botão no canto superior direito para escolher se você deseja ocultar ou mostrar o código no Pytorch ou no TensorFlow. Caso contrário, é esperado que funcione para ambos back-ends sem nenhuma alteração.</p></div> <h2 class="relative group"><a id="pipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#pipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Pipeline </span></h2> <p><code>pipeline()</code> é a maneira mais fácil de usar um modelo pré-treinado para uma dada tarefa.</p> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/tiZFewofSLM" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>A <code>pipeline()</code> apoia diversas tarefas fora da caixa:</p> <p><strong>Texto</strong>:</p> <ul><li>Análise sentimental: classifica a polaridade de um texto.</li> <li>Geração de texto (em Inglês): gera texto a partir de uma entrada.</li> <li>Reconhecimento de entidade mencionada: legenda cada palavra com uma classe que a representa (pessoa, data, local, etc…) </li> <li>Respostas: extrai uma resposta dado algum contexto e uma questão</li> <li>Máscara de preenchimento: preenche o espaço, dado um texto com máscaras de palavras.</li> <li>Sumarização: gera o resumo de um texto longo ou documento.</li> <li>Tradução: traduz texto para outra língua.</li> <li>Extração de características: cria um tensor que representa o texto.</li></ul> <p><strong>Imagem</strong>:</p> <ul><li>Classificação de imagens: classifica uma imagem.</li> <li>Segmentação de imagem: classifica cada pixel da imagem.</li> <li>Detecção de objetos: detecta objetos em uma imagem.</li></ul> <p><strong>Audio</strong>:</p> <ul><li>Classficação de áudio: legenda um trecho de áudio fornecido.</li> <li>Reconhecimento de fala automático: transcreve audio em texto.</li></ul> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Para mais detalhes sobre a <code>pipeline()</code> e tarefas associadas, siga a documentação <a href="./main_classes/pipelines">aqui</a>.</p></div> <h3 class="relative group"><a id="uso-da-pipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#uso-da-pipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Uso da pipeline </span></h3> <p>No exemplo a seguir, você usará <code>pipeline()</code> para análise sentimental.</p> <p>Instale as seguintes dependências se você ainda não o fez:</p> <div class="space-y-10 py-6 2xl:py-8 2xl:-mx-4"> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <span>Pytorch</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide Pytorch content</span></div></div> <div class="framework-content"> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install torch<!-- HTML_TAG_END --></pre></div></div></div> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <span>TensorFlow</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide TensorFlow content</span></div></div> <div class="framework-content"> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install tensorflow<!-- HTML_TAG_END --></pre></div> </div></div> </div> <p>Importe <code>pipeline()</code> e especifique a tarefa que deseja completar:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-meta">&gt;&gt;&gt; </span>classifier = pipeline(<span class="hljs-string">&quot;sentiment-analysis&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>A pipeline baixa and armazena um <a href="https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english" rel="nofollow">modelo pré-treinado</a> padrão e tokenizer para análise sentimental. Agora você pode usar <code>classifier</code> no texto alvo: </p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>classifier(<span class="hljs-string">&quot;We are very happy to show you the 🤗 Transformers library.&quot;</span>) [{<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;POSITIVE&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.9998</span>}]<!-- HTML_TAG_END --></pre></div> <p>Para mais de uma sentença, passe uma lista para a <code>pipeline()</code>, a qual retornará uma lista de dicionários:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>results = classifier([<span class="hljs-string">&quot;We are very happy to show you the 🤗 Transformers library.&quot;</span>, <span class="hljs-string">&quot;We hope you don&#x27;t hate it.&quot;</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> result <span class="hljs-keyword">in</span> results: <span class="hljs-meta">... </span> <span class="hljs-built_in">print</span>(<span class="hljs-string">f&quot;label: <span class="hljs-subst">{result[<span class="hljs-string">&#x27;label&#x27;</span>]}</span>, with score: <span class="hljs-subst">{<span class="hljs-built_in">round</span>(result[<span class="hljs-string">&#x27;score&#x27;</span>], <span class="hljs-number">4</span>)}</span>&quot;</span>) label: POSITIVE, <span class="hljs-keyword">with</span> score: <span class="hljs-number">0.9998</span> label: NEGATIVE, <span class="hljs-keyword">with</span> score: <span class="hljs-number">0.5309</span><!-- HTML_TAG_END --></pre></div> <p>A <code>pipeline()</code> também pode iterar sobre um Dataset inteiro. Comece instalando a biblioteca de <a href="https://huggingface.co/docs/datasets/" rel="nofollow">🤗 Datasets</a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install datasets <!-- HTML_TAG_END --></pre></div> <p>Crie uma <code>pipeline()</code> com a tarefa que deseja resolver e o modelo que deseja usar.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-meta">&gt;&gt;&gt; </span>speech_recognizer = pipeline(<span class="hljs-string">&quot;automatic-speech-recognition&quot;</span>, model=<span class="hljs-string">&quot;facebook/wav2vec2-base-960h&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>A seguir, carregue uma base de dados (confira a 🤗 <a href="https://huggingface.co/docs/datasets/quickstart.html" rel="nofollow">Iniciação em Datasets</a> para mais detalhes) que você gostaria de iterar sobre. Por exemplo, vamos carregar o dataset <a href="https://huggingface.co/datasets/PolyAI/minds14" rel="nofollow">MInDS-14</a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset, Audio <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;PolyAI/minds14&quot;</span>, name=<span class="hljs-string">&quot;en-US&quot;</span>, split=<span class="hljs-string">&quot;train&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Precisamos garantir que a taxa de amostragem do conjunto de dados corresponda à taxa de amostragem em que o facebook/wav2vec2-base-960h foi treinado.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>dataset = dataset.cast_column(<span class="hljs-string">&quot;audio&quot;</span>, Audio(sampling_rate=speech_recognizer.feature_extractor.sampling_rate))<!-- HTML_TAG_END --></pre></div> <p>Os arquivos de áudio são carregados e re-amostrados automaticamente ao chamar a coluna <code>&quot;audio&quot;</code>. Vamos extrair as arrays de formas de onda originais das primeiras 4 amostras e passá-las como uma lista para o pipeline:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>result = speech_recognizer(dataset[:<span class="hljs-number">4</span>][<span class="hljs-string">&quot;audio&quot;</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>([d[<span class="hljs-string">&quot;text&quot;</span>] <span class="hljs-keyword">for</span> d <span class="hljs-keyword">in</span> result]) [<span class="hljs-string">&#x27;I WOULD LIKE TO SET UP A JOINT ACCOUNT WITH MY PARTNER HOW DO I PROCEED WITH DOING THAT&#x27;</span>, <span class="hljs-string">&quot;FONDERING HOW I&#x27;D SET UP A JOIN TO HET WITH MY WIFE AND WHERE THE AP MIGHT BE&quot;</span>, <span class="hljs-string">&quot;I I&#x27;D LIKE TOY SET UP A JOINT ACCOUNT WITH MY PARTNER I&#x27;M NOT SEEING THE OPTION TO DO IT ON THE APSO I CALLED IN TO GET SOME HELP CAN I JUST DO IT OVER THE PHONE WITH YOU AND GIVE YOU THE INFORMATION OR SHOULD I DO IT IN THE AP AND I&#x27;M MISSING SOMETHING UQUETTE HAD PREFERRED TO JUST DO IT OVER THE PHONE OF POSSIBLE THINGS&quot;</span>, <span class="hljs-string">&#x27;HOW DO I TURN A JOIN A COUNT&#x27;</span>]<!-- HTML_TAG_END --></pre></div> <p>Para um conjunto de dados maior onde as entradas são maiores (como em fala ou visão), será necessário passar um gerador em vez de uma lista que carregue todas as entradas na memória. Consulte a <a href="./main_classes/pipelines">documentação do pipeline</a> para mais informações.</p> <h3 class="relative group"><a id="use-outro-modelo-e-tokenizer-na-pipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#use-outro-modelo-e-tokenizer-na-pipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Use outro modelo e tokenizer na pipeline </span></h3> <p>A <code>pipeline()</code> pode acomodar qualquer modelo do <a href="https://huggingface.co/models" rel="nofollow">Model Hub</a>, facilitando sua adaptação para outros casos de uso. Por exemplo, se você quiser um modelo capaz de lidar com texto em francês, use as tags no Model Hub para filtrar um modelo apropriado. O principal resultado filtrado retorna um <a href="https://huggingface.co/nlptown/bert-base-multilingual-uncased-sentiment" rel="nofollow">modelo BERT</a> bilíngue ajustado para análise de sentimentos. Ótimo, vamos usar este modelo!</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>model_name = <span class="hljs-string">&quot;nlptown/bert-base-multilingual-uncased-sentiment&quot;</span><!-- HTML_TAG_END --></pre></div> <div class="space-y-10 py-6 2xl:py-8 2xl:-mx-4"> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <span>Pytorch</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide Pytorch content</span></div></div> <div class="framework-content"> <p>Use o <code>AutoModelForSequenceClassification</code> e <code>AutoTokenizer</code> para carregar o modelo pré-treinado e seu tokenizer associado (mais em <code>AutoClass</code> abaixo):</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, AutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSequenceClassification.from_pretrained(model_name) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(model_name)<!-- HTML_TAG_END --></pre></div></div></div> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <span>TensorFlow</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide TensorFlow content</span></div></div> <div class="framework-content"> <p>Use o <code>TFAutoModelForSequenceClassification</code> and <code>AutoTokenizer</code> para carregar o modelo pré-treinado e o tokenizer associado (mais em <code>TFAutoClass</code> abaixo):</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, TFAutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForSequenceClassification.from_pretrained(model_name) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(model_name)<!-- HTML_TAG_END --></pre></div> </div></div> </div> <p>Então você pode especificar o modelo e o tokenizador na <code>pipeline()</code> e aplicar o <code>classifier</code> no seu texto alvo: </p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>classifier = pipeline(<span class="hljs-string">&quot;sentiment-analysis&quot;</span>, model=model, tokenizer=tokenizer) <span class="hljs-meta">&gt;&gt;&gt; </span>classifier(<span class="hljs-string">&quot;Nous sommes très heureux de vous présenter la bibliothèque 🤗 Transformers.&quot;</span>) [{<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;5 stars&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.7273</span>}]<!-- HTML_TAG_END --></pre></div> <p>Se você não conseguir achar um modelo para o seu caso de uso, precisará usar fine-tune em um modelo pré-treinado nos seus dados. Veja nosso <a href="./training">tutorial de fine-tuning</a> para descobrir como. Finalmente, depois que você tiver usado esse processo em seu modelo, considere compartilhá-lo conosco (veja o tutorial <a href="./model_sharing">aqui</a>) na plataforma Model Hub afim de democratizar NLP! 🤗</p> <h2 class="relative group"><a id="autoclass" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#autoclass"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>AutoClass </span></h2> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/AhChOFRegn4" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>Por baixo dos panos, as classes <code>AutoModelForSequenceClassification</code> e <code>AutoTokenizer</code> trabalham juntas para fortificar o <code>pipeline()</code>. Um <a href="./model_doc/auto">AutoClass</a> é um atalho que automaticamente recupera a arquitetura de um modelo pré-treinado a partir de seu nome ou caminho. Basta selecionar a <code>AutoClass</code> apropriada para sua tarefa e seu tokenizer associado com <code>AutoTokenizer</code>. </p> <p>Vamos voltar ao nosso exemplo e ver como você pode usar a <code>AutoClass</code> para replicar os resultados do <code>pipeline()</code>.</p> <h3 class="relative group"><a id="autotokenizer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#autotokenizer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>AutoTokenizer </span></h3> <p>Um tokenizer é responsável por pré-processar o texto em um formato que seja compreensível para o modelo. Primeiro, o tokenizer dividirá o texto em palavras chamadas <em>tokens</em>. Existem várias regras que regem o processo de tokenização, incluindo como dividir uma palavra e em que nível (saiba mais sobre tokenização <a href="./tokenizer_summary">aqui</a>). A coisa mais importante a lembrar, porém, é que você precisa instanciar o tokenizer com o mesmo nome do modelo para garantir que está usando as mesmas regras de tokenização com as quais um modelo foi pré-treinado.</p> <p>Carregue um tokenizer com <code>AutoTokenizer</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>model_name = <span class="hljs-string">&quot;nlptown/bert-base-multilingual-uncased-sentiment&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(model_name)<!-- HTML_TAG_END --></pre></div> <p>Em seguida, o tokenizer converte os tokens em números para construir um tensor como entrada para o modelo. Isso é conhecido como o <em>vocabulário</em> do modelo.</p> <p>Passe o texto para o tokenizer:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer(<span class="hljs-string">&quot;We are very happy to show you the 🤗 Transformers library.&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(encoding) {<span class="hljs-string">&#x27;input_ids&#x27;</span>: [<span class="hljs-number">101</span>, <span class="hljs-number">11312</span>, <span class="hljs-number">10320</span>, <span class="hljs-number">12495</span>, <span class="hljs-number">19308</span>, <span class="hljs-number">10114</span>, <span class="hljs-number">11391</span>, <span class="hljs-number">10855</span>, <span class="hljs-number">10103</span>, <span class="hljs-number">100</span>, <span class="hljs-number">58263</span>, <span class="hljs-number">13299</span>, <span class="hljs-number">119</span>, <span class="hljs-number">102</span>], <span class="hljs-string">&#x27;token_type_ids&#x27;</span>: [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], <span class="hljs-string">&#x27;attention_mask&#x27;</span>: [<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>]}<!-- HTML_TAG_END --></pre></div> <p>O tokenizer retornará um dicionário contendo:</p> <ul><li><a href="./glossary#input-ids">input_ids</a>: representações numéricas de seus tokens.</li> <li><a href=".glossary#attention-mask">atttention_mask</a>: indica quais tokens devem ser atendidos.</li></ul> <p>Assim como o <code>pipeline()</code>, o tokenizer aceitará uma lista de entradas. Além disso, o tokenizer também pode preencher e truncar o texto para retornar um lote com comprimento uniforme:</p> <div class="space-y-10 py-6 2xl:py-8 2xl:-mx-4"> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <span>Pytorch</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide Pytorch content</span></div></div> <div class="framework-content"> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>pt_batch = tokenizer( <span class="hljs-meta">... </span> [<span class="hljs-string">&quot;We are very happy to show you the 🤗 transformers library.&quot;</span>, <span class="hljs-string">&quot;We hope you don&#x27;t hate it.&quot;</span>], <span class="hljs-meta">... </span> padding=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> truncation=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> max_length=<span class="hljs-number">512</span>, <span class="hljs-meta">... </span> return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div></div></div> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <span>TensorFlow</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide TensorFlow content</span></div></div> <div class="framework-content"> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tf_batch = tokenizer( <span class="hljs-meta">... </span> [<span class="hljs-string">&quot;We are very happy to show you the 🤗 Transformers library.&quot;</span>, <span class="hljs-string">&quot;We hope you don&#x27;t hate it.&quot;</span>], <span class="hljs-meta">... </span> padding=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> truncation=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> max_length=<span class="hljs-number">512</span>, <span class="hljs-meta">... </span> return_tensors=<span class="hljs-string">&quot;tf&quot;</span>, <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> </div></div> </div> <p>Leia o tutorial de <a href="./pr%C3%A9-processamento">pré-processamento</a> para obter mais detalhes sobre tokenização.</p> <h3 class="relative group"><a id="automodel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#automodel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>AutoModel </span></h3> <div class="space-y-10 py-6 2xl:py-8 2xl:-mx-4"> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <span>Pytorch</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide Pytorch content</span></div></div> <div class="framework-content"> <p>🤗 Transformers fornecem uma maneira simples e unificada de carregar instâncias pré-treinadas. Isso significa que você pode carregar um <code>AutoModel</code> como carregaria um <code>AutoTokenizer</code>. A única diferença é selecionar o <code>AutoModel</code> correto para a tarefa. Como você está fazendo classificação de texto ou sequência, carregue <code>AutoModelForSequenceClassification</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model_name = <span class="hljs-string">&quot;nlptown/bert-base-multilingual-uncased-sentiment&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>pt_model = AutoModelForSequenceClassification.from_pretrained(model_name)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Veja o <a href="./task_summary">sumário de tarefas</a> para qual classe de <code>AutoModel</code> usar para cada tarefa.</p></div> <p>Agora você pode passar seu grupo de entradas pré-processadas diretamente para o modelo. Você apenas tem que descompactar o dicionário usando <code>**</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>pt_outputs = pt_model(**pt_batch)<!-- HTML_TAG_END --></pre></div> <p>O modelo gera as ativações finais no atributo <code>logits</code>. Aplique a função softmax aos <code>logits</code> para recuperar as probabilidades:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> torch <span class="hljs-keyword">import</span> nn <span class="hljs-meta">&gt;&gt;&gt; </span>pt_predictions = nn.functional.softmax(pt_outputs.logits, dim=-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(pt_predictions) tensor([[<span class="hljs-number">0.0021</span>, <span class="hljs-number">0.0018</span>, <span class="hljs-number">0.0115</span>, <span class="hljs-number">0.2121</span>, <span class="hljs-number">0.7725</span>], [<span class="hljs-number">0.2084</span>, <span class="hljs-number">0.1826</span>, <span class="hljs-number">0.1969</span>, <span class="hljs-number">0.1755</span>, <span class="hljs-number">0.2365</span>]], grad_fn=&lt;SoftmaxBackward0&gt;)<!-- HTML_TAG_END --></pre></div></div></div> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <span>TensorFlow</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide TensorFlow content</span></div></div> <div class="framework-content"> <p>🤗 Transformers fornecem uma maneira simples e unificada de carregar instâncias pré-treinadas. Isso significa que você pode carregar um <code>TFAutoModel</code> como carregaria um <code>AutoTokenizer</code>. A única diferença é selecionar o <code>TFAutoModel</code> correto para a tarefa. Como você está fazendo classificação de texto ou sequência, carregue <code>TFAutoModelForSequenceClassification</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model_name = <span class="hljs-string">&quot;nlptown/bert-base-multilingual-uncased-sentiment&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tf_model = TFAutoModelForSequenceClassification.from_pretrained(model_name)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Veja o <a href="./task_summary">sumário de tarefas</a> para qual classe de <code>AutoModel</code> usar para cada tarefa.</p></div> <p>Agora você pode passar seu grupo de entradas pré-processadas diretamente para o modelo através da passagem de chaves de dicionários ao tensor.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tf_outputs = tf_model(tf_batch)<!-- HTML_TAG_END --></pre></div> <p>O modelo gera as ativações finais no atributo <code>logits</code>. Aplique a função softmax aos <code>logits</code> para recuperar as probabilidades:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tf_predictions = tf.nn.softmax(tf_outputs.logits, axis=-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_predictions<!-- HTML_TAG_END --></pre></div> </div></div> </div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Todos os modelos de 🤗 Transformers (PyTorch ou TensorFlow) geram tensores <em>antes</em> da função de ativação final (como softmax) pois essa função algumas vezes é fundida com a perda.</p></div> <p>Os modelos são um standard <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow"><code>torch.nn.Module</code></a> ou um [<code>tf.keras.Model</code>](https: //<a href="http://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow">www.tensorflow.org/api_docs/python/tf/keras/Model</a>) para que você possa usá-los em seu loop de treinamento habitual. No entanto, para facilitar as coisas, 🤗 Transformers fornece uma classe <code>Trainer</code> para PyTorch que adiciona funcionalidade para treinamento distribuído, precisão mista e muito mais. Para o TensorFlow, você pode usar o método <code>fit</code> de <a href="https://keras.io/" rel="nofollow">Keras</a>. Consulte o <a href="./training">tutorial de treinamento</a> para obter mais detalhes.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>As saídas do modelo 🤗 Transformers são classes de dados especiais para que seus atributos sejam preenchidos automaticamente em um IDE. As saídas do modelo também se comportam como uma tupla ou um dicionário (por exemplo, você pode indexar com um inteiro, uma parte ou uma string), caso em que os atributos <code>None</code> são ignorados.</p></div> <h3 class="relative group"><a id="salvar-um-modelo" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#salvar-um-modelo"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Salvar um modelo </span></h3> <div class="space-y-10 py-6 2xl:py-8 2xl:-mx-4"> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <span>Pytorch</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide Pytorch content</span></div></div> <div class="framework-content"> <p>Uma vez que seu modelo estiver afinado, você pode salvá-lo com seu Tokenizer usando <code>PreTrainedModel.save_pretrained()</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>pt_save_directory = <span class="hljs-string">&quot;./pt_save_pretrained&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.save_pretrained(pt_save_directory) <span class="hljs-meta">&gt;&gt;&gt; </span>pt_model.save_pretrained(pt_save_directory)<!-- HTML_TAG_END --></pre></div> <p>Quando você estiver pronto para usá-lo novamente, recarregue com <code>PreTrainedModel.from_pretrained()</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>pt_model = AutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;./pt_save_pretrained&quot;</span>)<!-- HTML_TAG_END --></pre></div></div></div> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <span>TensorFlow</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide TensorFlow content</span></div></div> <div class="framework-content"> <p>Uma vez que seu modelo estiver afinado, você pode salvá-lo com seu Tokenizer usando <code>TFPreTrainedModel.save_pretrained()</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tf_save_directory = <span class="hljs-string">&quot;./tf_save_pretrained&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.save_pretrained(tf_save_directory) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_model.save_pretrained(tf_save_directory)<!-- HTML_TAG_END --></pre></div> <p>Quando você estiver pronto para usá-lo novamente, recarregue com <code>TFPreTrainedModel.from_pretrained()</code></p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tf_model = TFAutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;./tf_save_pretrained&quot;</span>)<!-- HTML_TAG_END --></pre></div> </div></div> </div> <p>Um recurso particularmente interessante dos 🤗 Transformers é a capacidade de salvar um modelo e recarregá-lo como um modelo PyTorch ou TensorFlow. Use <code>from_pt</code> ou <code>from_tf</code> para converter o modelo de um framework para outro:</p> <div class="space-y-10 py-6 2xl:py-8 2xl:-mx-4"> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <span>Pytorch</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide Pytorch content</span></div></div> <div class="framework-content"> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(tf_save_directory) <span class="hljs-meta">&gt;&gt;&gt; </span>pt_model = AutoModelForSequenceClassification.from_pretrained(tf_save_directory, from_tf=<span class="hljs-literal">True</span>)<!-- HTML_TAG_END --></pre></div></div></div> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <span>TensorFlow</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide TensorFlow content</span></div></div> <div class="framework-content"> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(pt_save_directory) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_model = TFAutoModelForSequenceClassification.from_pretrained(pt_save_directory, from_pt=<span class="hljs-literal">True</span>)<!-- HTML_TAG_END --></pre></div> </div></div> </div> <script type="module" data-hydrate="40ls6p"> import { start } from "/docs/transformers/pr_18789/pt/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="40ls6p"]').parentNode, paths: {"base":"/docs/transformers/pr_18789/pt","assets":"/docs/transformers/pr_18789/pt"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_18789/pt/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_18789/pt/_app/pages/quicktour.mdx-hf-doc-builder.js") ], params: {} } }); </script>
461
0
hf_public_repos/doc-build-dev/transformers/pr_18789
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/index.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;transformers&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;se-voc-estiver-procurando-suporte-do-time-da-hugging-face-acesse&quot;,&quot;title&quot;:&quot;Se você estiver procurando suporte do time da Hugging Face, acesse&quot;},{&quot;local&quot;:&quot;contedo&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;modelos-atuais&quot;,&quot;title&quot;:&quot;Modelos atuais&quot;},{&quot;local&quot;:&quot;frameworks-aceitos&quot;,&quot;title&quot;:&quot;Frameworks aceitos&quot;}],&quot;title&quot;:&quot;Conteúdo&quot;}],&quot;title&quot;:&quot;🤗 Transformers&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/pages/index.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/IconCopyLink-hf-doc-builder.js"> <h1 class="relative group"><a id="transformers" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>🤗 Transformers </span></h1> <p>Estado da Arte para Aprendizado de Máquina em PyTorch, TensorFlow e JAX. O 🤗 Transformers disponibiliza APIs para facilmente baixar e treinar modelos pré-treinados de última geração. O uso de modelos pré-treinados pode diminuir os seus custos de computação, a sua pegada de carbono, além de economizar o tempo necessário para se treinar um modelo do zero. Os modelos podem ser usados para diversas tarefas:</p> <ul><li>📝 Textos: classificação, extração de informações, perguntas e respostas, resumir, traduzir e gerar textos em mais de 100 idiomas.</li> <li>🖼 Imagens: classificação, deteção de objetos, e segmentação.</li> <li>🗣 Audio: reconhecimento de fala e classificação de áudio.</li> <li>🐙 Multimodal: perguntas tabeladas e respsostas, reconhecimento ótico de charactéres, extração de informação de documentos escaneados, classificação de vídeo, perguntas e respostas visuais.</li></ul> <p>Nossa biblioteca aceita integração contínua entre três das bibliotecas mais populares de aprendizado profundo: Our library supports seamless integration between three of the most popular deep learning libraries: <a href="https://pytorch.org/" rel="nofollow">PyTorch</a>, <a href="https://www.tensorflow.org/" rel="nofollow">TensorFlow</a> e <a href="https://jax.readthedocs.io/en/latest/" rel="nofollow">JAX</a>. Treine seu modelo em três linhas de código em um framework, e carregue-o para execução em outro.</p> <p>Cada arquitetura 🤗 Transformers é definida em um módulo individual do Python, para que seja facilmente customizável para pesquisa e experimentos.</p> <h2 class="relative group"><a id="se-voc-estiver-procurando-suporte-do-time-da-hugging-face-acesse" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#se-voc-estiver-procurando-suporte-do-time-da-hugging-face-acesse"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Se você estiver procurando suporte do time da Hugging Face, acesse </span></h2> <a target="_blank" href="https://huggingface.co/support"><img alt="HuggingFace Expert Acceleration Program" src="https://huggingface.co/front/thumbnails/support.png" style="max-width: 600px; border: 1px solid #eee; border-radius: 4px; box-shadow: 0 1px 2px 0 rgba(0, 0, 0, 0.05);">&amp;lt;/img&gt; </a><br> <h2 class="relative group"><a id="contedo" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#contedo"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Conteúdo </span></h2> <p>A documentação é dividida em cinco partes:</p> <ul><li><p><strong>INÍCIO</strong> contém um tour rápido de instalação e instruções para te dar um empurrão inicial com os 🤗 Transformers.</p></li> <li><p><strong>TUTORIAIS</strong> são perfeitos para começar a aprender sobre a nossa biblioteca. Essa seção irá te ajudar a desenvolver habilidades básicas necessárias para usar o 🤗 Transformers.</p></li> <li><p><strong>GUIAS PRÁTICOS</strong> irão te mostrar como alcançar um certo objetivo, como o fine-tuning de um modelo pré-treinado para modelamento de idioma, ou como criar um cabeçalho personalizado para um modelo.</p></li> <li><p><strong>GUIAS CONCEITUAIS</strong> te darão mais discussões e explicações dos conceitos fundamentais e idéias por trás dos modelos, tarefas e da filosofia de design por trás do 🤗 Transformers.</p></li> <li><p><strong>API</strong> descreve o funcionamento de cada classe e função, agrupada em:</p></li> <li><p><strong>CLASSES PRINCIPAIS</strong> para as classes que expõe as APIs importantes da biblioteca.</p></li> <li><p><strong>MODELOS</strong> para as classes e funções relacionadas à cada modelo implementado na biblioteca.</p></li> <li><p><strong>AUXILIARES INTERNOS</strong> para as classes e funções usadas internamente.</p></li></ul> <p>Atualmente a biblioteca contém implementações do PyTorch, TensorFlow e JAX, pesos para modelos pré-treinados e scripts de uso e conversão de utilidades para os seguintes modelos:</p> <h3 class="relative group"><a id="modelos-atuais" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#modelos-atuais"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Modelos atuais </span></h3> <ol><li><strong><a href="model_doc/albert">ALBERT</a></strong> (from Google Research and the Toyota Technological Institute at Chicago) released with the paper <a href="https://arxiv.org/abs/1909.11942" rel="nofollow">ALBERT: A Lite BERT for Self-supervised Learning of Language Representations</a>, by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.</li> <li><strong><a href="model_doc/bart">BART</a></strong> (from Facebook) released with the paper <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension</a> by Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov and Luke Zettlemoyer.</li> <li><strong><a href="model_doc/barthez">BARThez</a></strong> (from École polytechnique) released with the paper <a href="https://arxiv.org/abs/2010.12321" rel="nofollow">BARThez: a Skilled Pretrained French Sequence-to-Sequence Model</a> by Moussa Kamal Eddine, Antoine J.-P. Tixier, Michalis Vazirgiannis.</li> <li><strong><a href="model_doc/bartpho">BARTpho</a></strong> (from VinAI Research) released with the paper <a href="https://arxiv.org/abs/2109.09701" rel="nofollow">BARTpho: Pre-trained Sequence-to-Sequence Models for Vietnamese</a> by Nguyen Luong Tran, Duong Minh Le and Dat Quoc Nguyen.</li> <li><strong><a href="model_doc/beit">BEiT</a></strong> (from Microsoft) released with the paper <a href="https://arxiv.org/abs/2106.08254" rel="nofollow">BEiT: BERT Pre-Training of Image Transformers</a> by Hangbo Bao, Li Dong, Furu Wei.</li> <li><strong><a href="model_doc/bert">BERT</a></strong> (from Google) released with the paper <a href="https://arxiv.org/abs/1810.04805" rel="nofollow">BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding</a> by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova.</li> <li><strong><a href="model_doc/bertweet">BERTweet</a></strong> (from VinAI Research) released with the paper <a href="https://aclanthology.org/2020.emnlp-demos.2/" rel="nofollow">BERTweet: A pre-trained language model for English Tweets</a> by Dat Quoc Nguyen, Thanh Vu and Anh Tuan Nguyen.</li> <li><strong><a href="model_doc/bert-generation">BERT For Sequence Generation</a></strong> (from Google) released with the paper <a href="https://arxiv.org/abs/1907.12461" rel="nofollow">Leveraging Pre-trained Checkpoints for Sequence Generation Tasks</a> by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.</li> <li><strong><a href="model_doc/big_bird">BigBird-RoBERTa</a></strong> (from Google Research) released with the paper <a href="https://arxiv.org/abs/2007.14062" rel="nofollow">Big Bird: Transformers for Longer Sequences</a> by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.</li> <li><strong><a href="model_doc/bigbird_pegasus">BigBird-Pegasus</a></strong> (from Google Research) released with the paper <a href="https://arxiv.org/abs/2007.14062" rel="nofollow">Big Bird: Transformers for Longer Sequences</a> by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.</li> <li><strong><a href="model_doc/blenderbot">Blenderbot</a></strong> (from Facebook) released with the paper <a href="https://arxiv.org/abs/2004.13637" rel="nofollow">Recipes for building an open-domain chatbot</a> by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.</li> <li><strong><a href="model_doc/blenderbot-small">BlenderbotSmall</a></strong> (from Facebook) released with the paper <a href="https://arxiv.org/abs/2004.13637" rel="nofollow">Recipes for building an open-domain chatbot</a> by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.</li> <li><strong><a href="model_doc/bort">BORT</a></strong> (from Alexa) released with the paper <a href="https://arxiv.org/abs/2010.10499" rel="nofollow">Optimal Subarchitecture Extraction For BERT</a> by Adrian de Wynter and Daniel J. Perry.</li> <li><strong><a href="model_doc/byt5">ByT5</a></strong> (from Google Research) released with the paper <a href="https://arxiv.org/abs/2105.13626" rel="nofollow">ByT5: Towards a token-free future with pre-trained byte-to-byte models</a> by Linting Xue, Aditya Barua, Noah Constant, Rami Al-Rfou, Sharan Narang, Mihir Kale, Adam Roberts, Colin Raffel.</li> <li><strong><a href="model_doc/camembert">CamemBERT</a></strong> (from Inria/Facebook/Sorbonne) released with the paper <a href="https://arxiv.org/abs/1911.03894" rel="nofollow">CamemBERT: a Tasty French Language Model</a> by Louis Martin<em>, Benjamin Muller</em>, Pedro Javier Ortiz Suárez*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot.</li> <li><strong><a href="model_doc/canine">CANINE</a></strong> (from Google Research) released with the paper <a href="https://arxiv.org/abs/2103.06874" rel="nofollow">CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation</a> by Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting.</li> <li><strong><a href="model_doc/convnext">ConvNeXT</a></strong> (from Facebook AI) released with the paper <a href="https://arxiv.org/abs/2201.03545" rel="nofollow">A ConvNet for the 2020s</a> by Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie.</li> <li><strong><a href="model_doc/clip">CLIP</a></strong> (from OpenAI) released with the paper <a href="https://arxiv.org/abs/2103.00020" rel="nofollow">Learning Transferable Visual Models From Natural Language Supervision</a> by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever.</li> <li><strong><a href="model_doc/convbert">ConvBERT</a></strong> (from YituTech) released with the paper <a href="https://arxiv.org/abs/2008.02496" rel="nofollow">ConvBERT: Improving BERT with Span-based Dynamic Convolution</a> by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan.</li> <li><strong><a href="model_doc/cpm">CPM</a></strong> (from Tsinghua University) released with the paper <a href="https://arxiv.org/abs/2012.00413" rel="nofollow">CPM: A Large-scale Generative Chinese Pre-trained Language Model</a> by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun.</li> <li><strong><a href="model_doc/ctrl">CTRL</a></strong> (from Salesforce) released with the paper <a href="https://arxiv.org/abs/1909.05858" rel="nofollow">CTRL: A Conditional Transformer Language Model for Controllable Generation</a> by Nitish Shirish Keskar<em>, Bryan McCann</em>, Lav R. Varshney, Caiming Xiong and Richard Socher.</li> <li><strong><a href="model_doc/data2vec">Data2Vec</a></strong> (from Facebook) released with the paper <a href="https://arxiv.org/abs/2202.03555" rel="nofollow">Data2Vec: A General Framework for Self-supervised Learning in Speech, Vision and Language</a> by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu, Michael Auli.</li> <li><strong><a href="model_doc/deberta">DeBERTa</a></strong> (from Microsoft) released with the paper <a href="https://arxiv.org/abs/2006.03654" rel="nofollow">DeBERTa: Decoding-enhanced BERT with Disentangled Attention</a> by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen.</li> <li><strong><a href="model_doc/deberta-v2">DeBERTa-v2</a></strong> (from Microsoft) released with the paper <a href="https://arxiv.org/abs/2006.03654" rel="nofollow">DeBERTa: Decoding-enhanced BERT with Disentangled Attention</a> by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen.</li> <li><strong><a href="model_doc/decision_transformer">Decision Transformer</a></strong> (from Berkeley/Facebook/Google) released with the paper <a href="https://arxiv.org/abs/2106.01345" rel="nofollow">Decision Transformer: Reinforcement Learning via Sequence Modeling</a> by Lili Chen, Kevin Lu, Aravind Rajeswaran, Kimin Lee, Aditya Grover, Michael Laskin, Pieter Abbeel, Aravind Srinivas, Igor Mordatch.</li> <li><strong><a href="model_doc/dit">DiT</a></strong> (from Microsoft Research) released with the paper <a href="https://arxiv.org/abs/2203.02378" rel="nofollow">DiT: Self-supervised Pre-training for Document Image Transformer</a> by Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei.</li> <li><strong><a href="model_doc/deit">DeiT</a></strong> (from Facebook) released with the paper <a href="https://arxiv.org/abs/2012.12877" rel="nofollow">Training data-efficient image transformers &amp; distillation through attention</a> by Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou.</li> <li><strong><a href="model_doc/detr">DETR</a></strong> (from Facebook) released with the paper <a href="https://arxiv.org/abs/2005.12872" rel="nofollow">End-to-End Object Detection with Transformers</a> by Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, Sergey Zagoruyko.</li> <li><strong><a href="model_doc/dialogpt">DialoGPT</a></strong> (from Microsoft Research) released with the paper <a href="https://arxiv.org/abs/1911.00536" rel="nofollow">DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation</a> by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan.</li> <li><strong><a href="model_doc/distilbert">DistilBERT</a></strong> (from HuggingFace), released together with the paper <a href="https://arxiv.org/abs/1910.01108" rel="nofollow">DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter</a> by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into <a href="https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation" rel="nofollow">DistilGPT2</a>, RoBERTa into <a href="https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation" rel="nofollow">DistilRoBERTa</a>, Multilingual BERT into <a href="https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation" rel="nofollow">DistilmBERT</a> and a German version of DistilBERT.</li> <li><strong><a href="model_doc/dpr">DPR</a></strong> (from Facebook) released with the paper <a href="https://arxiv.org/abs/2004.04906" rel="nofollow">Dense Passage Retrieval for Open-Domain Question Answering</a> by Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih.</li> <li><strong><a href="master/model_doc/dpt">DPT</a></strong> (from Intel Labs) released with the paper <a href="https://arxiv.org/abs/2103.13413" rel="nofollow">Vision Transformers for Dense Prediction</a> by René Ranftl, Alexey Bochkovskiy, Vladlen Koltun.</li> <li><strong><a href="model_doc/encoder-decoder">EncoderDecoder</a></strong> (from Google Research) released with the paper <a href="https://arxiv.org/abs/1907.12461" rel="nofollow">Leveraging Pre-trained Checkpoints for Sequence Generation Tasks</a> by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.</li> <li><strong><a href="model_doc/electra">ELECTRA</a></strong> (from Google Research/Stanford University) released with the paper <a href="https://arxiv.org/abs/2003.10555" rel="nofollow">ELECTRA: Pre-training text encoders as discriminators rather than generators</a> by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning.</li> <li><strong><a href="model_doc/flaubert">FlauBERT</a></strong> (from CNRS) released with the paper <a href="https://arxiv.org/abs/1912.05372" rel="nofollow">FlauBERT: Unsupervised Language Model Pre-training for French</a> by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab.</li> <li><strong><a href="model_doc/fnet">FNet</a></strong> (from Google Research) released with the paper <a href="https://arxiv.org/abs/2105.03824" rel="nofollow">FNet: Mixing Tokens with Fourier Transforms</a> by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon.</li> <li><strong><a href="model_doc/funnel">Funnel Transformer</a></strong> (from CMU/Google Brain) released with the paper <a href="https://arxiv.org/abs/2006.03236" rel="nofollow">Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing</a> by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le.</li> <li><strong><a href="model_doc/glpn">GLPN</a></strong> (from KAIST) released with the paper <a href="https://arxiv.org/abs/2201.07436" rel="nofollow">Global-Local Path Networks for Monocular Depth Estimation with Vertical CutDepth</a> by Doyeon Kim, Woonghyun Ga, Pyungwhan Ahn, Donggyu Joo, Sehwan Chun, Junmo Kim.</li> <li><strong><a href="model_doc/openai-gpt">GPT</a></strong> (from OpenAI) released with the paper <a href="https://blog.openai.com/language-unsupervised/" rel="nofollow">Improving Language Understanding by Generative Pre-Training</a> by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever.</li> <li><strong><a href="model_doc/gpt2">GPT-2</a></strong> (from OpenAI) released with the paper <a href="https://blog.openai.com/better-language-models/" rel="nofollow">Language Models are Unsupervised Multitask Learners</a> by Alec Radford<em>, Jeffrey Wu</em>, Rewon Child, David Luan, Dario Amodei<strong>and Ilya Sutskever</strong>.</li> <li><strong><a href="model_doc/gptj">GPT-J</a></strong> (from EleutherAI) released in the repository <a href="https://github.com/kingoflolz/mesh-transformer-jax/" rel="nofollow">kingoflolz/mesh-transformer-jax</a> by Ben Wang and Aran Komatsuzaki.</li> <li><strong><a href="model_doc/gpt_neo">GPT Neo</a></strong> (from EleutherAI) released in the repository <a href="https://github.com/EleutherAI/gpt-neo" rel="nofollow">EleutherAI/gpt-neo</a> by Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy.</li> <li><strong><a href="model_doc/hubert">Hubert</a></strong> (from Facebook) released with the paper <a href="https://arxiv.org/abs/2106.07447" rel="nofollow">HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units</a> by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed.</li> <li><strong><a href="model_doc/ibert">I-BERT</a></strong> (from Berkeley) released with the paper <a href="https://arxiv.org/abs/2101.01321" rel="nofollow">I-BERT: Integer-only BERT Quantization</a> by Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W. Mahoney, Kurt Keutzer.</li> <li><strong><a href="model_doc/imagegpt">ImageGPT</a></strong> (from OpenAI) released with the paper <a href="https://openai.com/blog/image-gpt/" rel="nofollow">Generative Pretraining from Pixels</a> by Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, Ilya Sutskever.</li> <li><strong><a href="model_doc/layoutlm">LayoutLM</a></strong> (from Microsoft Research Asia) released with the paper <a href="https://arxiv.org/abs/1912.13318" rel="nofollow">LayoutLM: Pre-training of Text and Layout for Document Image Understanding</a> by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou.</li> <li><strong><a href="model_doc/layoutlmv2">LayoutLMv2</a></strong> (from Microsoft Research Asia) released with the paper <a href="https://arxiv.org/abs/2012.14740" rel="nofollow">LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding</a> by Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou.</li> <li><strong><a href="model_doc/layoutxlm">LayoutXLM</a></strong> (from Microsoft Research Asia) released with the paper <a href="https://arxiv.org/abs/2104.08836" rel="nofollow">LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding</a> by Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei.</li> <li><strong><a href="model_doc/led">LED</a></strong> (from AllenAI) released with the paper <a href="https://arxiv.org/abs/2004.05150" rel="nofollow">Longformer: The Long-Document Transformer</a> by Iz Beltagy, Matthew E. Peters, Arman Cohan.</li> <li><strong><a href="model_doc/longformer">Longformer</a></strong> (from AllenAI) released with the paper <a href="https://arxiv.org/abs/2004.05150" rel="nofollow">Longformer: The Long-Document Transformer</a> by Iz Beltagy, Matthew E. Peters, Arman Cohan.</li> <li><strong><a href="model_doc/luke">LUKE</a></strong> (from Studio Ousia) released with the paper <a href="https://arxiv.org/abs/2010.01057" rel="nofollow">LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention</a> by Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda, Yuji Matsumoto.</li> <li><strong><a href="model_doc/mluke">mLUKE</a></strong> (from Studio Ousia) released with the paper <a href="https://arxiv.org/abs/2110.08151" rel="nofollow">mLUKE: The Power of Entity Representations in Multilingual Pretrained Language Models</a> by Ryokan Ri, Ikuya Yamada, and Yoshimasa Tsuruoka.</li> <li><strong><a href="model_doc/lxmert">LXMERT</a></strong> (from UNC Chapel Hill) released with the paper <a href="https://arxiv.org/abs/1908.07490" rel="nofollow">LXMERT: Learning Cross-Modality Encoder Representations from Transformers for Open-Domain Question Answering</a> by Hao Tan and Mohit Bansal.</li> <li><strong><a href="model_doc/m2m_100">M2M100</a></strong> (from Facebook) released with the paper <a href="https://arxiv.org/abs/2010.11125" rel="nofollow">Beyond English-Centric Multilingual Machine Translation</a> by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin.</li> <li><strong><a href="model_doc/marian">MarianMT</a></strong> Machine translation models trained using <a href="http://opus.nlpl.eu/" rel="nofollow">OPUS</a> data by Jörg Tiedemann. The <a href="https://marian-nmt.github.io/" rel="nofollow">Marian Framework</a> is being developed by the Microsoft Translator Team.</li> <li><strong><a href="model_doc/maskformer">MaskFormer</a></strong> (from Meta and UIUC) released with the paper <a href="https://arxiv.org/abs/2107.06278" rel="nofollow">Per-Pixel Classification is Not All You Need for Semantic Segmentation</a> by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov.</li> <li><strong><a href="model_doc/mbart">MBart</a></strong> (from Facebook) released with the paper <a href="https://arxiv.org/abs/2001.08210" rel="nofollow">Multilingual Denoising Pre-training for Neural Machine Translation</a> by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer.</li> <li><strong><a href="model_doc/mbart">MBart-50</a></strong> (from Facebook) released with the paper <a href="https://arxiv.org/abs/2008.00401" rel="nofollow">Multilingual Translation with Extensible Multilingual Pretraining and Finetuning</a> by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan.</li> <li><strong><a href="model_doc/megatron-bert">Megatron-BERT</a></strong> (from NVIDIA) released with the paper <a href="https://arxiv.org/abs/1909.08053" rel="nofollow">Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism</a> by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.</li> <li><strong><a href="model_doc/megatron_gpt2">Megatron-GPT2</a></strong> (from NVIDIA) released with the paper <a href="https://arxiv.org/abs/1909.08053" rel="nofollow">Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism</a> by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.</li> <li><strong><a href="model_doc/mpnet">MPNet</a></strong> (from Microsoft Research) released with the paper <a href="https://arxiv.org/abs/2004.09297" rel="nofollow">MPNet: Masked and Permuted Pre-training for Language Understanding</a> by Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, Tie-Yan Liu.</li> <li><strong><a href="model_doc/mt5">MT5</a></strong> (from Google AI) released with the paper <a href="https://arxiv.org/abs/2010.11934" rel="nofollow">mT5: A massively multilingual pre-trained text-to-text transformer</a> by Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel.</li> <li><strong><a href="model_doc/nystromformer">Nyströmformer</a></strong> (from the University of Wisconsin - Madison) released with the paper <a href="https://arxiv.org/abs/2102.03902" rel="nofollow">Nyströmformer: A Nyström-Based Algorithm for Approximating Self-Attention</a> by Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, Vikas Singh.</li> <li><strong><a href="model_doc/pegasus">Pegasus</a></strong> (from Google) released with the paper <a href="https://arxiv.org/abs/1912.08777" rel="nofollow">PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization</a> by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu.</li> <li><strong><a href="model_doc/perceiver">Perceiver IO</a></strong> (from Deepmind) released with the paper <a href="https://arxiv.org/abs/2107.14795" rel="nofollow">Perceiver IO: A General Architecture for Structured Inputs &amp; Outputs</a> by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira.</li> <li><strong><a href="model_doc/phobert">PhoBERT</a></strong> (from VinAI Research) released with the paper <a href="https://www.aclweb.org/anthology/2020.findings-emnlp.92/" rel="nofollow">PhoBERT: Pre-trained language models for Vietnamese</a> by Dat Quoc Nguyen and Anh Tuan Nguyen.</li> <li><strong><a href="model_doc/plbart">PLBart</a></strong> (from UCLA NLP) released with the paper <a href="https://arxiv.org/abs/2103.06333" rel="nofollow">Unified Pre-training for Program Understanding and Generation</a> by Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang.</li> <li><strong><a href="model_doc/poolformer">PoolFormer</a></strong> (from Sea AI Labs) released with the paper <a href="https://arxiv.org/abs/2111.11418" rel="nofollow">MetaFormer is Actually What You Need for Vision</a> by Yu, Weihao and Luo, Mi and Zhou, Pan and Si, Chenyang and Zhou, Yichen and Wang, Xinchao and Feng, Jiashi and Yan, Shuicheng.</li> <li><strong><a href="model_doc/prophetnet">ProphetNet</a></strong> (from Microsoft Research) released with the paper <a href="https://arxiv.org/abs/2001.04063" rel="nofollow">ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training</a> by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.</li> <li><strong><a href="model_doc/qdqbert">QDQBert</a></strong> (from NVIDIA) released with the paper <a href="https://arxiv.org/abs/2004.09602" rel="nofollow">Integer Quantization for Deep Learning Inference: Principles and Empirical Evaluation</a> by Hao Wu, Patrick Judd, Xiaojie Zhang, Mikhail Isaev and Paulius Micikevicius.</li> <li><strong><a href="model_doc/realm.html">REALM</a></strong> (from Google Research) released with the paper <a href="https://arxiv.org/abs/2002.08909" rel="nofollow">REALM: Retrieval-Augmented Language Model Pre-Training</a> by Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat and Ming-Wei Chang.</li> <li><strong><a href="model_doc/reformer">Reformer</a></strong> (from Google Research) released with the paper <a href="https://arxiv.org/abs/2001.04451" rel="nofollow">Reformer: The Efficient Transformer</a> by Nikita Kitaev, Łukasz Kaiser, Anselm Levskaya.</li> <li><strong><a href="model_doc/rembert">RemBERT</a></strong> (from Google Research) released with the paper <a href="https://arxiv.org/abs/2010.12821" rel="nofollow">Rethinking embedding coupling in pre-trained language models</a> by Hyung Won Chung, Thibault Févry, Henry Tsai, M. Johnson, Sebastian Ruder.</li> <li><strong><a href="model_doc/regnet">RegNet</a></strong> (from META Platforms) released with the paper <a href="https://arxiv.org/abs/2003.13678" rel="nofollow">Designing Network Design Space</a> by Ilija Radosavovic, Raj Prateek Kosaraju, Ross Girshick, Kaiming He, Piotr Dollár.</li> <li><strong><a href="model_doc/resnet">ResNet</a></strong> (from Microsoft Research) released with the paper <a href="https://arxiv.org/abs/1512.03385" rel="nofollow">Deep Residual Learning for Image Recognition</a> by Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun.</li> <li><strong><a href="model_doc/roberta">RoBERTa</a></strong> (from Facebook), released together with the paper <a href="https://arxiv.org/abs/1907.11692" rel="nofollow">RoBERTa: A Robustly Optimized BERT Pretraining Approach</a> by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov.</li> <li><strong><a href="model_doc/roformer">RoFormer</a></strong> (from ZhuiyiTechnology), released together with the paper <a href="https://arxiv.org/abs/2104.09864" rel="nofollow">RoFormer: Enhanced Transformer with Rotary Position Embedding</a> by Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu.</li> <li><strong><a href="model_doc/segformer">SegFormer</a></strong> (from NVIDIA) released with the paper <a href="https://arxiv.org/abs/2105.15203" rel="nofollow">SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers</a> by Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo.</li> <li><strong><a href="model_doc/sew">SEW</a></strong> (from ASAPP) released with the paper <a href="https://arxiv.org/abs/2109.06870" rel="nofollow">Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition</a> by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.</li> <li><strong><a href="model_doc/sew_d">SEW-D</a></strong> (from ASAPP) released with the paper <a href="https://arxiv.org/abs/2109.06870" rel="nofollow">Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition</a> by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.</li> <li><strong><a href="model_doc/speech_to_text">SpeechToTextTransformer</a></strong> (from Facebook), released together with the paper <a href="https://arxiv.org/abs/2010.05171" rel="nofollow">fairseq S2T: Fast Speech-to-Text Modeling with fairseq</a> by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Dmytro Okhonko, Juan Pino.</li> <li><strong><a href="model_doc/speech_to_text_2">SpeechToTextTransformer2</a></strong> (from Facebook), released together with the paper <a href="https://arxiv.org/abs/2104.06678" rel="nofollow">Large-Scale Self- and Semi-Supervised Learning for Speech Translation</a> by Changhan Wang, Anne Wu, Juan Pino, Alexei Baevski, Michael Auli, Alexis Conneau.</li> <li><strong><a href="model_doc/splinter">Splinter</a></strong> (from Tel Aviv University), released together with the paper <a href="https://arxiv.org/abs/2101.00438" rel="nofollow">Few-Shot Question Answering by Pretraining Span Selection</a> by Ori Ram, Yuval Kirstain, Jonathan Berant, Amir Globerson, Omer Levy.</li> <li><strong><a href="model_doc/squeezebert">SqueezeBert</a></strong> (from Berkeley) released with the paper <a href="https://arxiv.org/abs/2006.11316" rel="nofollow">SqueezeBERT: What can computer vision teach NLP about efficient neural networks?</a> by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer.</li> <li><strong><a href="model_doc/swin">Swin Transformer</a></strong> (from Microsoft) released with the paper <a href="https://arxiv.org/abs/2103.14030" rel="nofollow">Swin Transformer: Hierarchical Vision Transformer using Shifted Windows</a> by Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, Baining Guo.</li> <li><strong><a href="model_doc/t5">T5</a></strong> (from Google AI) released with the paper <a href="https://arxiv.org/abs/1910.10683" rel="nofollow">Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer</a> by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.</li> <li><strong><a href="model_doc/t5v1.1">T5v1.1</a></strong> (from Google AI) released in the repository <a href="https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511" rel="nofollow">google-research/text-to-text-transfer-transformer</a> by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.</li> <li><strong><a href="model_doc/tapas">TAPAS</a></strong> (from Google AI) released with the paper <a href="https://arxiv.org/abs/2004.02349" rel="nofollow">TAPAS: Weakly Supervised Table Parsing via Pre-training</a> by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos.</li> <li><strong><a href="model_doc/tapex">TAPEX</a></strong> (from Microsoft Research) released with the paper <a href="https://arxiv.org/abs/2107.07653" rel="nofollow">TAPEX: Table Pre-training via Learning a Neural SQL Executor</a> by Qian Liu, Bei Chen, Jiaqi Guo, Morteza Ziyadi, Zeqi Lin, Weizhu Chen, Jian-Guang Lou.</li> <li><strong><a href="model_doc/transfo-xl">Transformer-XL</a></strong> (from Google/CMU) released with the paper <a href="https://arxiv.org/abs/1901.02860" rel="nofollow">Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context</a> by Zihang Dai<em>, Zhilin Yang</em>, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.</li> <li><strong><a href="model_doc/trocr">TrOCR</a></strong> (from Microsoft), released together with the paper <a href="https://arxiv.org/abs/2109.10282" rel="nofollow">TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models</a> by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei.</li> <li><strong><a href="model_doc/unispeech">UniSpeech</a></strong> (from Microsoft Research) released with the paper <a href="https://arxiv.org/abs/2101.07597" rel="nofollow">UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data</a> by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.</li> <li><strong><a href="model_doc/unispeech-sat">UniSpeechSat</a></strong> (from Microsoft Research) released with the paper <a href="https://arxiv.org/abs/2110.05752" rel="nofollow">UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING</a> by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu.</li> <li><strong><a href="model_doc/van">VAN</a></strong> (from Tsinghua University and Nankai University) released with the paper <a href="https://arxiv.org/abs/2202.09741" rel="nofollow">Visual Attention Network</a> by Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu.</li> <li><strong><a href="model_doc/vilt">ViLT</a></strong> (from NAVER AI Lab/Kakao Enterprise/Kakao Brain) released with the paper <a href="https://arxiv.org/abs/2102.03334" rel="nofollow">ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision</a> by Wonjae Kim, Bokyung Son, Ildoo Kim.</li> <li><strong><a href="model_doc/vit">Vision Transformer (ViT)</a></strong> (from Google AI) released with the paper <a href="https://arxiv.org/abs/2010.11929" rel="nofollow">An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale</a> by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby.</li> <li><strong><a href="model_doc/vit_mae">ViTMAE</a></strong> (from Meta AI) released with the paper <a href="https://arxiv.org/abs/2111.06377" rel="nofollow">Masked Autoencoders Are Scalable Vision Learners</a> by Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, Ross Girshick.</li> <li><strong><a href="model_doc/visual_bert">VisualBERT</a></strong> (from UCLA NLP) released with the paper <a href="https://arxiv.org/pdf/1908.03557" rel="nofollow">VisualBERT: A Simple and Performant Baseline for Vision and Language</a> by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang.</li> <li><strong><a href="model_doc/wavlm">WavLM</a></strong> (from Microsoft Research) released with the paper <a href="https://arxiv.org/abs/2110.13900" rel="nofollow">WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing</a> by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei.</li> <li><strong><a href="model_doc/wav2vec2">Wav2Vec2</a></strong> (from Facebook AI) released with the paper <a href="https://arxiv.org/abs/2006.11477" rel="nofollow">wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations</a> by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli.</li> <li><strong><a href="model_doc/wav2vec2_phoneme">Wav2Vec2Phoneme</a></strong> (from Facebook AI) released with the paper <a href="https://arxiv.org/abs/2109.11680" rel="nofollow">Simple and Effective Zero-shot Cross-lingual Phoneme Recognition</a> by Qiantong Xu, Alexei Baevski, Michael Auli.</li> <li><strong><a href="model_doc/xglm">XGLM</a></strong> (From Facebook AI) released with the paper <a href="https://arxiv.org/abs/2112.10668" rel="nofollow">Few-shot Learning with Multilingual Language Models</a> by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O’Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li.</li> <li><strong><a href="model_doc/xlm">XLM</a></strong> (from Facebook) released together with the paper <a href="https://arxiv.org/abs/1901.07291" rel="nofollow">Cross-lingual Language Model Pretraining</a> by Guillaume Lample and Alexis Conneau.</li> <li><strong><a href="model_doc/xlm-prophetnet">XLM-ProphetNet</a></strong> (from Microsoft Research) released with the paper <a href="https://arxiv.org/abs/2001.04063" rel="nofollow">ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training</a> by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.</li> <li><strong><a href="model_doc/xlm-roberta">XLM-RoBERTa</a></strong> (from Facebook AI), released together with the paper <a href="https://arxiv.org/abs/1911.02116" rel="nofollow">Unsupervised Cross-lingual Representation Learning at Scale</a> by Alexis Conneau<em>, Kartikay Khandelwal</em>, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov.</li> <li><strong><a href="model_doc/xlm-roberta-xl">XLM-RoBERTa-XL</a></strong> (from Facebook AI), released together with the paper <a href="https://arxiv.org/abs/2105.00572" rel="nofollow">Larger-Scale Transformers for Multilingual Masked Language Modeling</a> by Naman Goyal, Jingfei Du, Myle Ott, Giri Anantharaman, Alexis Conneau.</li> <li><strong><a href="model_doc/xlnet">XLNet</a></strong> (from Google/CMU) released with the paper <a href="https://arxiv.org/abs/1906.08237" rel="nofollow">​XLNet: Generalized Autoregressive Pretraining for Language Understanding</a> by Zhilin Yang<em>, Zihang Dai</em>, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le.</li> <li><strong><a href="model_doc/xlsr_wav2vec2">XLSR-Wav2Vec2</a></strong> (from Facebook AI) released with the paper <a href="https://arxiv.org/abs/2006.13979" rel="nofollow">Unsupervised Cross-Lingual Representation Learning For Speech Recognition</a> by Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael Auli.</li> <li><strong><a href="model_doc/xls_r">XLS-R</a></strong> (from Facebook AI) released with the paper <a href="https://arxiv.org/abs/2111.09296" rel="nofollow">XLS-R: Self-supervised Cross-lingual Speech Representation Learning at Scale</a> by Arun Babu, Changhan Wang, Andros Tjandra, Kushal Lakhotia, Qiantong Xu, Naman Goyal, Kritika Singh, Patrick von Platen, Yatharth Saraf, Juan Pino, Alexei Baevski, Alexis Conneau, Michael Auli.</li> <li><strong><a href="model_doc/yoso">YOSO</a></strong> (from the University of Wisconsin - Madison) released with the paper <a href="https://arxiv.org/abs/2111.09714" rel="nofollow">You Only Sample (Almost) Once: Linear Cost Self-Attention Via Bernoulli Sampling</a> by Zhanpeng Zeng, Yunyang Xiong, Sathya N. Ravi, Shailesh Acharya, Glenn Fung, Vikas Singh.</li></ol> <h3 class="relative group"><a id="frameworks-aceitos" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#frameworks-aceitos"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Frameworks aceitos </span></h3> <p>A tabela abaixo representa a lista de suporte na biblioteca para cada um dos seguintes modelos, caso tenham um tokenizer do Python (chamado de “slow”), ou um tokenizer construído em cima da biblioteca 🤗 Tokenizers (chamado de “fast”). Além disso, são diferenciados pelo suporte em diferentes frameworks: JAX (por meio do Flax); PyTorch; e/ou Tensorflow.</p> <table><thead><tr><th align="center">Model</th> <th align="center">Tokenizer slow</th> <th align="center">Tokenizer fast</th> <th align="center">PyTorch support</th> <th align="center">TensorFlow support</th> <th align="center">Flax Support</th></tr></thead> <tbody><tr><td align="center">ALBERT</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">BART</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">BEiT</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td></tr> <tr><td align="center">BERT</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">Bert Generation</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">BigBird</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td></tr> <tr><td align="center">BigBirdPegasus</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">Blenderbot</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">BlenderbotSmall</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">CamemBERT</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">Canine</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">CLIP</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">ConvBERT</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">ConvNext</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">CTRL</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">Data2VecAudio</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">Data2VecText</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">Data2VecVision</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">DeBERTa</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">DeBERTa-v2</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">Decision Transformer</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">DeiT</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">DETR</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">DistilBERT</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">DPR</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">DPT</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">ELECTRA</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">Encoder decoder</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">FairSeq Machine-Translation</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">FlauBERT</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">FNet</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">Funnel Transformer</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">GLPN</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">GPT Neo</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td></tr> <tr><td align="center">GPT-J</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">Hubert</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">I-BERT</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">ImageGPT</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">LayoutLM</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">LayoutLMv2</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">LED</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">Longformer</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">LUKE</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">LXMERT</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">M2M100</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">Marian</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">MaskFormer</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">mBART</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">MegatronBert</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">MobileBERT</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">MPNet</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">mT5</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">Nystromformer</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">OpenAI GPT</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">OpenAI GPT-2</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">Pegasus</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">Perceiver</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">PLBart</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">PoolFormer</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">ProphetNet</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">QDQBert</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">RAG</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">Realm</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">Reformer</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">RegNet</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">RemBERT</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">ResNet</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">RetriBERT</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">RoBERTa</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">RoFormer</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">SegFormer</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">SEW</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">SEW-D</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">Speech Encoder decoder</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td></tr> <tr><td align="center">Speech2Text</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">Speech2Text2</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">Splinter</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">SqueezeBERT</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">Swin</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">T5</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">TAPAS</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">TAPEX</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">Transformer-XL</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">TrOCR</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">UniSpeech</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">UniSpeechSat</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">VAN</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">ViLT</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">Vision Encoder decoder</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">VisionTextDualEncoder</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td></tr> <tr><td align="center">VisualBert</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">ViT</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">ViTMAE</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">Wav2Vec2</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">WavLM</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">XGLM</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td></tr> <tr><td align="center">XLM</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">XLM-RoBERTa</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">XLM-RoBERTa-XL</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">XLMProphetNet</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">XLNet</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">YOSO</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr></tbody></table> <script type="module" data-hydrate="4oquw5"> import { start } from "/docs/transformers/pr_18789/pt/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="4oquw5"]').parentNode, paths: {"base":"/docs/transformers/pr_18789/pt","assets":"/docs/transformers/pr_18789/pt"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_18789/pt/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_18789/pt/_app/pages/index.mdx-hf-doc-builder.js") ], params: {} } }); </script>
462
0
hf_public_repos/doc-build-dev/transformers/pr_18789
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/accelerate.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;treinamento-distribudo-com-o-accelerate&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;configurao&quot;,&quot;title&quot;:&quot;Configuração&quot;},{&quot;local&quot;:&quot;preparando-a-acelerao&quot;,&quot;title&quot;:&quot;Preparando a aceleração&quot;},{&quot;local&quot;:&quot;backward&quot;,&quot;title&quot;:&quot;Backward&quot;},{&quot;local&quot;:&quot;treinamento&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;treinamento-em-um-script&quot;,&quot;title&quot;:&quot;Treinamento em um Script&quot;},{&quot;local&quot;:&quot;treinamento-em-um-notebook&quot;,&quot;title&quot;:&quot;Treinamento em um Notebook&quot;}],&quot;title&quot;:&quot;Treinamento&quot;}],&quot;title&quot;:&quot;Treinamento distribuído com o 🤗 Accelerate&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/pages/accelerate.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/IconCopyLink-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/CodeBlock-hf-doc-builder.js"> <h1 class="relative group"><a id="treinamento-distribudo-com-o-accelerate" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#treinamento-distribudo-com-o-accelerate"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Treinamento distribuído com o 🤗 Accelerate </span></h1> <p>O paralelismo surgiu como uma estratégia para treinar modelos grandes em hardware limitado e aumentar a velocidade de treinamento em várias órdens de magnitude. Na Hugging Face criamos a biblioteca <a href="https://huggingface.co/docs/accelerate" rel="nofollow">🤗 Accelerate</a> para ajudar os usuários a treinar modelos 🤗 Transformers com qualquer configuração distribuída, seja em uma máquina com múltiplos GPUs ou em múltiplos GPUs distribuidos entre muitas máquinas. Neste tutorial, você irá aprender como personalizar seu laço de treinamento de PyTorch para poder treinar em ambientes distribuídos.</p> <h2 class="relative group"><a id="configurao" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#configurao"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Configuração </span></h2> <p>De início, instale o 🤗 Accelerate:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install accelerate<!-- HTML_TAG_END --></pre></div> <p>Logo, devemos importar e criar um objeto <a href="https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator" rel="nofollow"><code>Accelerator</code></a>. O <code>Accelerator</code> detectará automáticamente a configuração distribuída disponível e inicializará todos os componentes necessários para o treinamento. Não há necessidade portanto de especificar o dispositivo onde deve colocar seu modelo.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> accelerate <span class="hljs-keyword">import</span> Accelerator <span class="hljs-meta">&gt;&gt;&gt; </span>accelerator = Accelerator()<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="preparando-a-acelerao" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#preparando-a-acelerao"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Preparando a aceleração </span></h2> <p>Passe todos os objetos relevantes ao treinamento para o método <a href="https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.prepare" rel="nofollow"><code>prepare</code></a>. Isto inclui os DataLoaders de treino e evaluação, um modelo e um otimizador:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare( <span class="hljs-meta">... </span> train_dataloader, eval_dataloader, model, optimizer <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="backward" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#backward"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Backward </span></h2> <p>Por último, substitua o <code>loss.backward()</code> padrão em seu laço de treinamento com o método <a href="https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.backward" rel="nofollow"><code>backward</code></a> do 🤗 Accelerate:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> epoch <span class="hljs-keyword">in</span> <span class="hljs-built_in">range</span>(num_epochs): <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> batch <span class="hljs-keyword">in</span> train_dataloader: <span class="hljs-meta">... </span> outputs = model(**batch) <span class="hljs-meta">... </span> loss = outputs.loss <span class="hljs-meta">... </span> accelerator.backward(loss) <span class="hljs-meta">... </span> optimizer.step() <span class="hljs-meta">... </span> lr_scheduler.step() <span class="hljs-meta">... </span> optimizer.zero_grad() <span class="hljs-meta">... </span> progress_bar.update(<span class="hljs-number">1</span>)<!-- HTML_TAG_END --></pre></div> <p>Como se poder ver no seguinte código, só precisará adicionar quatro linhas de código ao seu laço de treinamento para habilitar o treinamento distribuído!</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-addition">+ from accelerate import Accelerator</span> from transformers import AdamW, AutoModelForSequenceClassification, get_scheduler <span class="hljs-addition">+ accelerator = Accelerator()</span> model = AutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=2) optimizer = AdamW(model.parameters(), lr=3e-5) <span class="hljs-deletion">- device = torch.device(&quot;cuda&quot;) if torch.cuda.is_available() else torch.device(&quot;cpu&quot;)</span> <span class="hljs-deletion">- model.to(device)</span> <span class="hljs-addition">+ train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare(</span> <span class="hljs-addition">+ train_dataloader, eval_dataloader, model, optimizer</span> <span class="hljs-addition">+ )</span> num_epochs = 3 num_training_steps = num_epochs * len(train_dataloader) lr_scheduler = get_scheduler( &quot;linear&quot;, optimizer=optimizer, num_warmup_steps=0, num_training_steps=num_training_steps ) progress_bar = tqdm(range(num_training_steps)) model.train() for epoch in range(num_epochs): for batch in train_dataloader: <span class="hljs-deletion">- batch = {k: v.to(device) for k, v in batch.items()}</span> outputs = model(**batch) loss = outputs.loss <span class="hljs-deletion">- loss.backward()</span> <span class="hljs-addition">+ accelerator.backward(loss)</span> optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1)<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="treinamento" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#treinamento"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Treinamento </span></h2> <p>Quando tiver adicionado as linhas de código relevantes, inicie o treinamento por um script ou notebook como o Colab.</p> <h3 class="relative group"><a id="treinamento-em-um-script" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#treinamento-em-um-script"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Treinamento em um Script </span></h3> <p>Se estiver rodando seu treinamento em um Script, execute o seguinte comando para criar e guardar um arquivo de configuração:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->accelerate config<!-- HTML_TAG_END --></pre></div> <p>Comece o treinamento com:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->accelerate launch train.py<!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="treinamento-em-um-notebook" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#treinamento-em-um-notebook"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Treinamento em um Notebook </span></h3> <p>O 🤗 Accelerate pode rodar em um notebook, por exemplo, se estiver planejando usar as TPUs do Google Colab. Encapsule o código responsável pelo treinamento de uma função e passe-o ao <code>notebook_launcher</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> accelerate <span class="hljs-keyword">import</span> notebook_launcher <span class="hljs-meta">&gt;&gt;&gt; </span>notebook_launcher(training_function)<!-- HTML_TAG_END --></pre></div> <p>Para obter mais informações sobre o 🤗 Accelerate e suas numerosas funções, consulte a <a href="https://huggingface.co/docs/accelerate/index" rel="nofollow">documentación</a>.</p> <script type="module" data-hydrate="hdow02"> import { start } from "/docs/transformers/pr_18789/pt/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="hdow02"]').parentNode, paths: {"base":"/docs/transformers/pr_18789/pt","assets":"/docs/transformers/pr_18789/pt"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_18789/pt/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_18789/pt/_app/pages/accelerate.mdx-hf-doc-builder.js") ], params: {} } }); </script>
463
0
hf_public_repos/doc-build-dev/transformers/pr_18789
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/training.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;finetuning-de-um-modelo-prtreinado&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;preparando-um-dataset&quot;,&quot;title&quot;:&quot;Preparando um dataset&quot;},{&quot;local&quot;:&quot;finetuning-com-o-trainer&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;hiperparmetros-de-treinamento&quot;,&quot;title&quot;:&quot;Hiperparâmetros de treinamento&quot;},{&quot;local&quot;:&quot;mtricas&quot;,&quot;title&quot;:&quot;Métricas&quot;},{&quot;local&quot;:&quot;trainer&quot;,&quot;title&quot;:&quot;Trainer&quot;}],&quot;title&quot;:&quot;Fine-tuning com o `Trainer`&quot;},{&quot;local&quot;:&quot;finetuning-com-keras&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;converso-do-dataset-ao-formato-do-tensorflow&quot;,&quot;title&quot;:&quot;Conversão do dataset ao formato do TensorFlow&quot;},{&quot;local&quot;:&quot;compilao-e-ajustes&quot;,&quot;title&quot;:&quot;Compilação e ajustes&quot;}],&quot;title&quot;:&quot;Fine-tuning com Keras&quot;},{&quot;local&quot;:&quot;finetune-em-pytorch-nativo&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;dataloader&quot;,&quot;title&quot;:&quot;DataLoader&quot;},{&quot;local&quot;:&quot;otimizao-e-configurao-do-learning-rate&quot;,&quot;title&quot;:&quot;Otimização e configuração do Learning Rate&quot;},{&quot;local&quot;:&quot;ciclo-de-treinamento&quot;,&quot;title&quot;:&quot;Ciclo de treinamento&quot;},{&quot;local&quot;:&quot;mtricas&quot;,&quot;title&quot;:&quot;Métricas&quot;}],&quot;title&quot;:&quot;Fine-tune em PyTorch nativo&quot;},{&quot;local&quot;:&quot;recursos-adicionais&quot;,&quot;title&quot;:&quot;Recursos adicionais&quot;}],&quot;title&quot;:&quot;Fine-tuning de um modelo pré-treinado&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/pages/training.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/Tip-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/Youtube-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/IconCopyLink-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/CodeBlock-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/DocNotebookDropdown-hf-doc-builder.js"> <h1 class="relative group"><a id="finetuning-de-um-modelo-prtreinado" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#finetuning-de-um-modelo-prtreinado"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Fine-tuning de um modelo pré-treinado </span></h1> <div class="flex space-x-1 absolute z-10 right-0 top-0"> <div class="relative colab-dropdown "> <button class=" " type="button"> <img alt="Open In Colab" class="!m-0" src="https://colab.research.google.com/assets/colab-badge.svg"> </button> </div> <div class="relative colab-dropdown "> <button class=" " type="button"> <img alt="Open In Studio Lab" class="!m-0" src="https://studiolab.sagemaker.aws/studiolab.svg"> </button> </div></div> <p>O uso de um modelo pré-treinado tem importantes vantagens. Redução do custo computacional, a pegada de carbono, e te permite utilizar modelos de última geração sem ter que treinar um novo desde o início. O 🤗 Transformers proporciona acesso a milhares de modelos pré-treinados numa ampla gama de tarefas. Quando utilizar um modelo pré-treinado, treine-o com um dataset específico para a sua tarefa. Isto é chamado de fine-tuning, uma técnica de treinamento incrivelmente poderosa. Neste tutorial faremos o fine-tuning de um modelo pré-treinado com um framework de Deep Learning da sua escolha:</p> <ul><li>Fine-tuning de um modelo pré-treinado com o 🤗 Transformers <code>Trainer</code>.</li> <li>Fine-tuning de um modelo pré-treinado no TensorFlow com o Keras.</li> <li>Fine-tuning de um modelo pré-treinado em PyTorch nativo.</li></ul> <a id="data-processing"></a> <h2 class="relative group"><a id="preparando-um-dataset" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#preparando-um-dataset"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Preparando um dataset </span></h2> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/_BZearw7f0w" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>Antes de aplicar o fine-tuning a um modelo pré-treinado, baixe um dataset e prepare-o para o treinamento. O tutorial anterior ensinará a processar os dados para o treinamento, e então poderá ter a oportunidade de testar esse novo conhecimento em algo prático.</p> <p>Comece carregando o dataset <a href="https://huggingface.co/datasets/yelp_review_full" rel="nofollow">Yelp Reviews</a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;yelp_review_full&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>dataset[<span class="hljs-number">100</span>] {<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-number">0</span>, <span class="hljs-string">&#x27;text&#x27;</span>: <span class="hljs-string">&#x27;My expectations for McDonalds are t rarely high. But for one to still fail so spectacularly...that takes something special!\\nThe cashier took my friends\&#x27;s order, then promptly ignored me. I had to force myself in front of a cashier who opened his register to wait on the person BEHIND me. I waited over five minutes for a gigantic order that included precisely one kid\&#x27;s meal. After watching two people who ordered after me be handed their food, I asked where mine was. The manager started yelling at the cashiers for \\&quot;serving off their orders\\&quot; when they didn\&#x27;t have their food. But neither cashier was anywhere near those controls, and the manager was the one serving food to customers and clearing the boards.\\nThe manager was rude when giving me my order. She didn\&#x27;t make sure that I had everything ON MY RECEIPT, and never even had the decency to apologize that I felt I was getting poor service.\\nI\&#x27;ve eaten at various McDonalds restaurants for over 30 years. I\&#x27;ve worked at more than one location. I expect bad days, bad moods, and the occasional mistake. But I have yet to have a decent experience at this store. It will remain a place I avoid unless someone in my party needs to avoid illness from low blood sugar. Perhaps I should go back to the racially biased service of Steak n Shake instead!&#x27;</span>}<!-- HTML_TAG_END --></pre></div> <p>Como já sabe, é necessário ter um tokenizador para processar o texto e incluir uma estratégia de padding e truncamento, para manejar qualquer tamanho varíavel de sequência. Para processar o seu dataset em apenas um passo, utilize o método de 🤗 Datasets <a href="https://huggingface.co/docs/datasets/process.html#map" rel="nofollow"><code>map</code></a> para aplicar uma função de preprocessamento sobre todo o dataset.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">tokenize_function</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> tokenizer(examples[<span class="hljs-string">&quot;text&quot;</span>], padding=<span class="hljs-string">&quot;max_length&quot;</span>, truncation=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenized_datasets = dataset.<span class="hljs-built_in">map</span>(tokenize_function, batched=<span class="hljs-literal">True</span>)<!-- HTML_TAG_END --></pre></div> <p>Se desejar, é possível criar um subconjunto menor do dataset completo para aplicar o fine-tuning e assim reduzir o tempo necessário.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>small_train_dataset = tokenized_datasets[<span class="hljs-string">&quot;train&quot;</span>].shuffle(seed=<span class="hljs-number">42</span>).select(<span class="hljs-built_in">range</span>(<span class="hljs-number">1000</span>)) <span class="hljs-meta">&gt;&gt;&gt; </span>small_eval_dataset = tokenized_datasets[<span class="hljs-string">&quot;test&quot;</span>].shuffle(seed=<span class="hljs-number">42</span>).select(<span class="hljs-built_in">range</span>(<span class="hljs-number">1000</span>))<!-- HTML_TAG_END --></pre></div> <a id="trainer"></a> <h2 class="relative group"><a id="finetuning-com-o-trainer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#finetuning-com-o-trainer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Fine-tuning com o <code>Trainer</code></span></h2> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/nvBXf7s7vTI" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>O 🤗 Transformers proporciona uma classe <code>Trainer</code> otimizada para o treinamento de modelos de 🤗 Transformers, facilitando os primeiros passos do treinamento sem a necessidade de escrever manualmente o seu próprio ciclo. A API do <code>Trainer</code> suporta um grande conjunto de opções de treinamento e funcionalidades, como o logging, o gradient accumulation e o mixed precision.</p> <p>Comece carregando seu modelo e especifique o número de labels de previsão. A partir do <a href="https://huggingface.co/datasets/yelp_review_full#data-fields" rel="nofollow">Card Dataset</a> do Yelp Reveiw, que ja sabemos ter 5 labels usamos o seguinte código:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>, num_labels=<span class="hljs-number">5</span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Você verá um alerta sobre alguns pesos pré-treinados que não estão sendo utilizados e que alguns pesos estão sendo inicializados aleatoriamente. Não se preocupe, essa mensagem é completamente normal. O header/cabeçário pré-treinado do modelo BERT é descartado e substitui-se por um header de classificação inicializado aleatoriamente. Assim, pode aplicar o fine-tuning a este novo header do modelo em sua tarefa de classificação de sequências fazendo um transfer learning do modelo pré-treinado.</p></div> <h3 class="relative group"><a id="hiperparmetros-de-treinamento" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#hiperparmetros-de-treinamento"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Hiperparâmetros de treinamento </span></h3> <p>Em seguida, crie uma classe <code>TrainingArguments</code> que contenha todos os hiperparâmetros que possam ser ajustados, assim como os indicadores para ativar as diferentes opções de treinamento. Para este tutorial, você pode começar o treinamento usando os <a href="https://huggingface.co/docs/transformers/main_classes/trainer#transformers.TrainingArguments" rel="nofollow">hiperparámetros</a> padrão, porém, sinta-se livre para experimentar com eles e encontrar uma configuração ótima.</p> <p>Especifique onde salvar os checkpoints do treinamento:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TrainingArguments <span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments(output_dir=<span class="hljs-string">&quot;test_trainer&quot;</span>)<!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="mtricas" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#mtricas"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Métricas </span></h3> <p>O <code>Trainer</code> não avalia automaticamente o rendimento do modelo durante o treinamento. Será necessário passar ao <code>Trainer</code> uma função para calcular e fazer um diagnóstico sobre as métricas. A biblioteca 🤗 Datasets proporciona uma função de <a href="https://huggingface.co/metrics/accuracy" rel="nofollow"><code>accuracy</code></a> simples que pode ser carregada com a função <code>load_metric</code> (ver este <a href="https://huggingface.co/docs/datasets/metrics.html" rel="nofollow">tutorial</a> para mais informações):</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> numpy <span class="hljs-keyword">as</span> np <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_metric <span class="hljs-meta">&gt;&gt;&gt; </span>metric = load_metric(<span class="hljs-string">&quot;accuracy&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Defina a função <code>compute</code> dentro de <code>metric</code> para calcular a precisão das suas predições. Antes de passar as suas predições ao <code>compute</code>, é necessário converter as predições à logits (lembre-se que todos os modelos de 🤗 Transformers retornam logits).</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">compute_metrics</span>(<span class="hljs-params">eval_pred</span>): <span class="hljs-meta">... </span> logits, labels = eval_pred <span class="hljs-meta">... </span> predictions = np.argmax(logits, axis=-<span class="hljs-number">1</span>) <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> metric.compute(predictions=predictions, references=labels)<!-- HTML_TAG_END --></pre></div> <p>Se quiser controlar as suas métricas de avaliação durante o fine-tuning, especifique o parâmetro <code>evaluation_strategy</code> nos seus argumentos de treinamento para que o modelo considere a métrica de avaliação ao final de cada época:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TrainingArguments <span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments(output_dir=<span class="hljs-string">&quot;test_trainer&quot;</span>, evaluation_strategy=<span class="hljs-string">&quot;epoch&quot;</span>)<!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="trainer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#trainer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Trainer </span></h3> <p>Crie um objeto <code>Trainer</code> com o seu modelo, argumentos de treinamento, conjuntos de dados de treinamento e de teste, e a sua função de avaliação:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=small_train_dataset, <span class="hljs-meta">... </span> eval_dataset=small_eval_dataset, <span class="hljs-meta">... </span> compute_metrics=compute_metrics, <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <p>Em seguida, aplique o fine-tuning a seu modelo chamado <code>train()</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()<!-- HTML_TAG_END --></pre></div> <a id="keras"></a> <h2 class="relative group"><a id="finetuning-com-keras" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#finetuning-com-keras"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Fine-tuning com Keras </span></h2> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/rnTGBy2ax1c" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>Os modelos de 🤗 Transformers também permitem realizar o treinamento com o TensorFlow com a API do Keras. Contudo, será necessário fazer algumas mudanças antes de realizar o fine-tuning.</p> <h3 class="relative group"><a id="converso-do-dataset-ao-formato-do-tensorflow" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#converso-do-dataset-ao-formato-do-tensorflow"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Conversão do dataset ao formato do TensorFlow </span></h3> <p>O <code>DefaultDataCollator</code> junta os tensores em um batch para que o modelo possa ser treinado em cima deles. Assegure-se de especificar os <code>return_tensors</code> para retornar os tensores do TensorFlow:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DefaultDataCollator <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DefaultDataCollator(return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>O <code>Trainer</code> utiliza <code>DataCollatorWithPadding</code> por padrão, então você não precisa especificar explicitamente um colador de dados (data collator).</p></div> <p>Em seguida, converta os datasets tokenizados em datasets do TensorFlow com o método <a href="https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.to_tf_dataset" rel="nofollow"><code>to_tf_dataset</code></a>. Especifique suas entradas em <code>columns</code> e seu rótulo em <code>label_cols</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tf_train_dataset = small_train_dataset.to_tf_dataset( <span class="hljs-meta">... </span> columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;token_type_ids&quot;</span>], <span class="hljs-meta">... </span> label_cols=[<span class="hljs-string">&quot;labels&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">8</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_validation_dataset = small_eval_dataset.to_tf_dataset( <span class="hljs-meta">... </span> columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;token_type_ids&quot;</span>], <span class="hljs-meta">... </span> label_cols=[<span class="hljs-string">&quot;labels&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">8</span>, <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="compilao-e-ajustes" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#compilao-e-ajustes"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Compilação e ajustes </span></h3> <p>Carregue um modelo do TensorFlow com o número esperado de rótulos:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>, num_labels=<span class="hljs-number">5</span>)<!-- HTML_TAG_END --></pre></div> <p>A seguir, compile e ajuste o fine-tuning a seu modelo com <a href="https://keras.io/api/models/model_training_apis/" rel="nofollow"><code>fit</code></a> como faria com qualquer outro modelo do Keras:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>model.<span class="hljs-built_in">compile</span>( <span class="hljs-meta">... </span> optimizer=tf.keras.optimizers.Adam(learning_rate=<span class="hljs-number">5e-5</span>), <span class="hljs-meta">... </span> loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=<span class="hljs-literal">True</span>), <span class="hljs-meta">... </span> metrics=tf.metrics.SparseCategoricalAccuracy(), <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.fit(tf_train_dataset, validation_data=tf_validation_dataset, epochs=<span class="hljs-number">3</span>)<!-- HTML_TAG_END --></pre></div> <a id="pytorch_native"></a> <h2 class="relative group"><a id="finetune-em-pytorch-nativo" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#finetune-em-pytorch-nativo"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Fine-tune em PyTorch nativo </span></h2> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/Dh9CL8fyG80" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>O <code>Trainer</code> se encarrega do ciclo de treinamento e permite aplicar o fine-tuning a um modelo em uma linha de código apenas. Para os usuários que preferirem escrever o seu próprio ciclo de treinamento, também é possível aplicar o fine-tuning a um modelo de 🤗 Transformers em PyTorch nativo.</p> <p>Neste momento, talvez ocorra a necessidade de reinicar seu notebook ou executar a seguinte linha de código para liberar memória:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-keyword">del</span> model <span class="hljs-keyword">del</span> pytorch_model <span class="hljs-keyword">del</span> trainer torch.cuda.empty_cache()<!-- HTML_TAG_END --></pre></div> <p>Em sequência, faremos um post-processing manual do <code>tokenized_dataset</code> e assim prepará-lo para o treinamento.</p> <ol><li><p>Apague a coluna de <code>text</code> porque o modelo não aceita texto cru como entrada:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tokenized_datasets = tokenized_datasets.remove_columns([<span class="hljs-string">&quot;text&quot;</span>])<!-- HTML_TAG_END --></pre></div></li> <li><p>Troque o nome da coluna <code>label</code> para <code>labels</code>, pois o modelo espera um argumento de mesmo nome:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tokenized_datasets = tokenized_datasets.rename_column(<span class="hljs-string">&quot;label&quot;</span>, <span class="hljs-string">&quot;labels&quot;</span>)<!-- HTML_TAG_END --></pre></div></li> <li><p>Defina o formato do dataset para retornar tensores do PyTorch no lugar de listas:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tokenized_datasets.set_format(<span class="hljs-string">&quot;torch&quot;</span>)<!-- HTML_TAG_END --></pre></div></li></ol> <p>Em sequência, crie um subconjunto menor do dataset, como foi mostrado anteriormente, para acelerá-lo o fine-tuning.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>small_train_dataset = tokenized_datasets[<span class="hljs-string">&quot;train&quot;</span>].shuffle(seed=<span class="hljs-number">42</span>).select(<span class="hljs-built_in">range</span>(<span class="hljs-number">1000</span>)) <span class="hljs-meta">&gt;&gt;&gt; </span>small_eval_dataset = tokenized_datasets[<span class="hljs-string">&quot;test&quot;</span>].shuffle(seed=<span class="hljs-number">42</span>).select(<span class="hljs-built_in">range</span>(<span class="hljs-number">1000</span>))<!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="dataloader" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#dataloader"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>DataLoader </span></h3> <p>Crie um <code>DataLoader</code> para os seus datasets de treinamento e de teste para poder iterar sobre batches de dados:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> torch.utils.data <span class="hljs-keyword">import</span> DataLoader <span class="hljs-meta">&gt;&gt;&gt; </span>train_dataloader = DataLoader(small_train_dataset, shuffle=<span class="hljs-literal">True</span>, batch_size=<span class="hljs-number">8</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>eval_dataloader = DataLoader(small_eval_dataset, batch_size=<span class="hljs-number">8</span>)<!-- HTML_TAG_END --></pre></div> <p>Carregue seu modelo com o número de labels esperados:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>, num_labels=<span class="hljs-number">5</span>)<!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="otimizao-e-configurao-do-learning-rate" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#otimizao-e-configurao-do-learning-rate"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Otimização e configuração do Learning Rate </span></h3> <p>Crie um otimizador e um learning rate para aplicar o fine-tuning ao modelo. Iremos utilizar o otimizador <a href="https://pytorch.org/docs/stable/generated/torch.optim.AdamW.html" rel="nofollow"><code>AdamW</code></a> do PyTorch:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> torch.optim <span class="hljs-keyword">import</span> AdamW <span class="hljs-meta">&gt;&gt;&gt; </span>optimizer = AdamW(model.parameters(), lr=<span class="hljs-number">5e-5</span>)<!-- HTML_TAG_END --></pre></div> <p>Defina o learning rate do <code>Trainer</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> get_scheduler <span class="hljs-meta">&gt;&gt;&gt; </span>num_epochs = <span class="hljs-number">3</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_training_steps = num_epochs * <span class="hljs-built_in">len</span>(train_dataloader) <span class="hljs-meta">&gt;&gt;&gt; </span>lr_scheduler = get_scheduler( <span class="hljs-meta">... </span> name=<span class="hljs-string">&quot;linear&quot;</span>, optimizer=optimizer, num_warmup_steps=<span class="hljs-number">0</span>, num_training_steps=num_training_steps <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <p>Por último, especifique o <code>device</code> do ambiente para utilizar uma GPU se tiver acesso à alguma. Caso contrário, o treinamento em uma CPU pode acabar levando várias horas em vez de minutos.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>device = torch.device(<span class="hljs-string">&quot;cuda&quot;</span>) <span class="hljs-keyword">if</span> torch.cuda.is_available() <span class="hljs-keyword">else</span> torch.device(<span class="hljs-string">&quot;cpu&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.to(device)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Se necessário, você pode obter o acesso gratuito a uma GPU na núvem por meio de um notebook no <a href="https://colab.research.google.com/" rel="nofollow">Colaboratory</a> ou <a href="https://studiolab.sagemaker.aws/" rel="nofollow">SageMaker StudioLab</a> se não tiver esse recurso de forma local.</p></div> <p>Perfeito, agora estamos prontos para começar o treinamento! 🥳</p> <h3 class="relative group"><a id="ciclo-de-treinamento" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#ciclo-de-treinamento"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Ciclo de treinamento </span></h3> <p>Para visualizar melhor o processo de treinamento, utilize a biblioteca <a href="https://tqdm.github.io/" rel="nofollow">tqdm</a> para adicionar uma barra de progresso sobre o número de passos percorridos no treinamento atual:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> tqdm.auto <span class="hljs-keyword">import</span> tqdm <span class="hljs-meta">&gt;&gt;&gt; </span>progress_bar = tqdm(<span class="hljs-built_in">range</span>(num_training_steps)) <span class="hljs-meta">&gt;&gt;&gt; </span>model.train() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> epoch <span class="hljs-keyword">in</span> <span class="hljs-built_in">range</span>(num_epochs): <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> batch <span class="hljs-keyword">in</span> train_dataloader: <span class="hljs-meta">... </span> batch = {k: v.to(device) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> batch.items()} <span class="hljs-meta">... </span> outputs = model(**batch) <span class="hljs-meta">... </span> loss = outputs.loss <span class="hljs-meta">... </span> loss.backward() <span class="hljs-meta">... </span> optimizer.step() <span class="hljs-meta">... </span> lr_scheduler.step() <span class="hljs-meta">... </span> optimizer.zero_grad() <span class="hljs-meta">... </span> progress_bar.update(<span class="hljs-number">1</span>)<!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="mtricas" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#mtricas"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Métricas </span></h3> <p>Da mesma forma que é necessário adicionar uma função de avaliação ao <code>Trainer</code>, é necessário fazer o mesmo quando escrevendo o próprio ciclo de treinamento. Contudo, em vez de calcular e retornar a métrica final de cada época, você deverá adicionar todos os batches com <a href="https://huggingface.co/docs/datasets/package_reference/main_classes.html?highlight=add_batch#datasets.Metric.add_batch" rel="nofollow"><code>add_batch</code></a> e calcular a métrica apenas no final.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>metric = load_metric(<span class="hljs-string">&quot;accuracy&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.<span class="hljs-built_in">eval</span>() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> batch <span class="hljs-keyword">in</span> eval_dataloader: <span class="hljs-meta">... </span> batch = {k: v.to(device) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> batch.items()} <span class="hljs-meta">... </span> <span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> outputs = model(**batch) <span class="hljs-meta">... </span> logits = outputs.logits <span class="hljs-meta">... </span> predictions = torch.argmax(logits, dim=-<span class="hljs-number">1</span>) <span class="hljs-meta">... </span> metric.add_batch(predictions=predictions, references=batch[<span class="hljs-string">&quot;labels&quot;</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>metric.compute()<!-- HTML_TAG_END --></pre></div> <a id="additional-resources"></a> <h2 class="relative group"><a id="recursos-adicionais" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#recursos-adicionais"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Recursos adicionais </span></h2> <p>Para mais exemplos de fine-tuning acesse:</p> <ul><li><p><a href="https://github.com/huggingface/transformers/tree/main/examples" rel="nofollow">🤗 Transformers Examples</a> inclui scripts para treinas tarefas comuns de NLP em PyTorch e TensorFlow.</p></li> <li><p><a href="notebooks">🤗 Transformers Notebooks</a> contém vários notebooks sobre como aplicar o fine-tuning a um modelo para tarefas específicas no PyTorch e TensorFlow.</p></li></ul> <script type="module" data-hydrate="12l0db5"> import { start } from "/docs/transformers/pr_18789/pt/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="12l0db5"]').parentNode, paths: {"base":"/docs/transformers/pr_18789/pt","assets":"/docs/transformers/pr_18789/pt"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_18789/pt/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_18789/pt/_app/pages/training.mdx-hf-doc-builder.js") ], params: {} } }); </script>
464
0
hf_public_repos/doc-build-dev/transformers/pr_18789
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/create_a_model.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;criar-uma-arquitetura-customizada&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;configuration&quot;,&quot;title&quot;:&quot;configuration&quot;},{&quot;local&quot;:&quot;modelo&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;heads-do-modelo&quot;,&quot;title&quot;:&quot;Heads do modelo&quot;}],&quot;title&quot;:&quot;Modelo&quot;},{&quot;local&quot;:&quot;tokenizer&quot;,&quot;title&quot;:&quot;Tokenizer&quot;},{&quot;local&quot;:&quot;extrator-de-features&quot;,&quot;title&quot;:&quot;Extrator de features&quot;},{&quot;local&quot;:&quot;processor&quot;,&quot;title&quot;:&quot;Processor&quot;}],&quot;title&quot;:&quot;Criar uma arquitetura customizada&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/pages/create_a_model.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/Tip-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/IconCopyLink-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/CodeBlock-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/Markdown-hf-doc-builder.js"> <h1 class="relative group"><a id="criar-uma-arquitetura-customizada" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#criar-uma-arquitetura-customizada"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Criar uma arquitetura customizada </span></h1> <p>Uma <a href="model_doc/auto"><code>AutoClass</code></a> automaticamente infere a arquitetura do modelo e baixa configurações e pesos pré-treinados. Geralmente, nós recomendamos usar uma <code>AutoClass</code> para produzir um código independente de checkpoints. Mas usuários que querem mais contole sobre parâmetros específicos do modelo pode criar um modelo customizado 🤗 Transformers a partir de algumas classes bases. Isso pode ser particulamente útil para alguém que está interessado em estudar, treinar ou fazer experimentos com um modelo 🤗 Transformers. Nesse tutorial, será explicado como criar um modelo customizado sem uma <code>AutoClass</code>. Aprenda como:</p> <ul><li>Carregar e customizar a configuração de um modelo.</li> <li>Criar a arquitetura de um modelo.</li> <li>Criar um tokenizer rápido e devagar para textos.</li> <li>Criar extrator de features para tarefas envolvendo audio e imagem.</li> <li>Criar um processador para tarefas multimodais.</li></ul> <h2 class="relative group"><a id="configuration" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#configuration"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>configuration </span></h2> <p>A <a href="main_classes/configuration">configuration</a> refere-se a atributos específicos de um modelo. Cada configuração de modelo tem atributos diferentes; por exemplo, todos modelo de PLN possuem os atributos <code>hidden_size</code>, <code>num_attention_heads</code>, <code>num_hidden_layers</code> e <code>vocab_size</code> em comum. Esse atributos especificam o numero de ‘attention heads’ ou ‘hidden layers’ para construir um modelo.</p> <p>Dê uma olhada a mais em <a href="model_doc/distilbert">DistilBERT</a> acessando <code>DistilBertConfig</code> para observar esses atributos:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DistilBertConfig <span class="hljs-meta">&gt;&gt;&gt; </span>config = DistilBertConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(config) DistilBertConfig { <span class="hljs-string">&quot;activation&quot;</span>: <span class="hljs-string">&quot;gelu&quot;</span>, <span class="hljs-string">&quot;attention_dropout&quot;</span>: <span class="hljs-number">0.1</span>, <span class="hljs-string">&quot;dim&quot;</span>: <span class="hljs-number">768</span>, <span class="hljs-string">&quot;dropout&quot;</span>: <span class="hljs-number">0.1</span>, <span class="hljs-string">&quot;hidden_dim&quot;</span>: <span class="hljs-number">3072</span>, <span class="hljs-string">&quot;initializer_range&quot;</span>: <span class="hljs-number">0.02</span>, <span class="hljs-string">&quot;max_position_embeddings&quot;</span>: <span class="hljs-number">512</span>, <span class="hljs-string">&quot;model_type&quot;</span>: <span class="hljs-string">&quot;distilbert&quot;</span>, <span class="hljs-string">&quot;n_heads&quot;</span>: <span class="hljs-number">12</span>, <span class="hljs-string">&quot;n_layers&quot;</span>: <span class="hljs-number">6</span>, <span class="hljs-string">&quot;pad_token_id&quot;</span>: <span class="hljs-number">0</span>, <span class="hljs-string">&quot;qa_dropout&quot;</span>: <span class="hljs-number">0.1</span>, <span class="hljs-string">&quot;seq_classif_dropout&quot;</span>: <span class="hljs-number">0.2</span>, <span class="hljs-string">&quot;sinusoidal_pos_embds&quot;</span>: false, <span class="hljs-string">&quot;transformers_version&quot;</span>: <span class="hljs-string">&quot;4.16.2&quot;</span>, <span class="hljs-string">&quot;vocab_size&quot;</span>: <span class="hljs-number">30522</span> }<!-- HTML_TAG_END --></pre></div> <p><code>DistilBertConfig</code> mostra todos os atributos padrões usados para construir um <code>DistilBertModel</code> base. Todos atributos são customizáveis, o que cria espaço para experimentos. Por exemplo, você pode customizar um modelo padrão para:</p> <ul><li>Tentar uma função de ativação diferente com o parâmetro <code>activation</code>.</li> <li>Usar uma taxa de desistência maior para as probabilidades de ‘attention’ com o parâmetro <code>attention_dropout</code>.</li></ul> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>my_config = DistilBertConfig(activation=<span class="hljs-string">&quot;relu&quot;</span>, attention_dropout=<span class="hljs-number">0.4</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(my_config) DistilBertConfig { <span class="hljs-string">&quot;activation&quot;</span>: <span class="hljs-string">&quot;relu&quot;</span>, <span class="hljs-string">&quot;attention_dropout&quot;</span>: <span class="hljs-number">0.4</span>, <span class="hljs-string">&quot;dim&quot;</span>: <span class="hljs-number">768</span>, <span class="hljs-string">&quot;dropout&quot;</span>: <span class="hljs-number">0.1</span>, <span class="hljs-string">&quot;hidden_dim&quot;</span>: <span class="hljs-number">3072</span>, <span class="hljs-string">&quot;initializer_range&quot;</span>: <span class="hljs-number">0.02</span>, <span class="hljs-string">&quot;max_position_embeddings&quot;</span>: <span class="hljs-number">512</span>, <span class="hljs-string">&quot;model_type&quot;</span>: <span class="hljs-string">&quot;distilbert&quot;</span>, <span class="hljs-string">&quot;n_heads&quot;</span>: <span class="hljs-number">12</span>, <span class="hljs-string">&quot;n_layers&quot;</span>: <span class="hljs-number">6</span>, <span class="hljs-string">&quot;pad_token_id&quot;</span>: <span class="hljs-number">0</span>, <span class="hljs-string">&quot;qa_dropout&quot;</span>: <span class="hljs-number">0.1</span>, <span class="hljs-string">&quot;seq_classif_dropout&quot;</span>: <span class="hljs-number">0.2</span>, <span class="hljs-string">&quot;sinusoidal_pos_embds&quot;</span>: false, <span class="hljs-string">&quot;transformers_version&quot;</span>: <span class="hljs-string">&quot;4.16.2&quot;</span>, <span class="hljs-string">&quot;vocab_size&quot;</span>: <span class="hljs-number">30522</span> }<!-- HTML_TAG_END --></pre></div> <p>Atributos de um modelo pré-treinado podem ser modificados na função <code>from_pretrained()</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>my_config = DistilBertConfig.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>, activation=<span class="hljs-string">&quot;relu&quot;</span>, attention_dropout=<span class="hljs-number">0.4</span>)<!-- HTML_TAG_END --></pre></div> <p>Uma vez que você está satisfeito com as configurações do seu modelo, você consegue salvar elas com <code>save_pretrained()</code>. Seu arquivo de configurações está salvo como um arquivo JSON no diretório especificado:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>my_config.save_pretrained(save_directory=<span class="hljs-string">&quot;./your_model_save_path&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Para reusar o arquivo de configurações, carregue com <code>from_pretrained()</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>my_config = DistilBertConfig.from_pretrained(<span class="hljs-string">&quot;./your_model_save_path/my_config.json&quot;</span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Você pode também salvar seu arquivo de configurações como um dicionário ou até mesmo com a diferença entre as seus atributos de configuração customizados e os atributos de configuração padrões! Olhe a documentação <a href="main_classes/configuration">configuration</a> para mais detalhes.</p></div> <h2 class="relative group"><a id="modelo" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#modelo"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Modelo </span></h2> <p>O próximo passo é criar um <a href="main_classes/models">model</a>. O modelo - também vagamente referido como arquitetura - define o que cada camada está fazendo e quais operações estão acontecendo. Atributos como <code>num_hidden_layers</code> das configurações são utilizados para definir a arquitetura. Todo modelo compartilha a classe base <code>PreTrainedModel</code> e alguns métodos em comum como redimensionar o tamanho dos embeddings de entrada e podar as ‘self-attention heads’. Além disso, todos os modelos também são subclasses de <a href="https://pytorch.org/docs/stable/generated/torch.nn.Module.html" rel="nofollow"><code>torch.nn.Module</code></a>, <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow"><code>tf.keras.Model</code></a> ou <a href="https://flax.readthedocs.io/en/latest/flax.linen.html#module" rel="nofollow"><code>flax.linen.Module</code></a>. Isso significa que os modelos são compatíveis com cada respectivo uso de framework.</p> <div class="space-y-10 py-6 2xl:py-8 2xl:-mx-4"> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <span>Pytorch</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide Pytorch content</span></div></div> <div class="framework-content"> <p>Carregar seus atributos de configuração customizados em um modelo:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DistilBertModel <span class="hljs-meta">&gt;&gt;&gt; </span>my_config = DistilBertConfig.from_pretrained(<span class="hljs-string">&quot;./your_model_save_path/my_config.json&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = DistilBertModel(my_config)<!-- HTML_TAG_END --></pre></div> <p>Isso cria um modelo com valores aleatórios ao invés de pré-treinar os pesos. Você não irá conseguir usar usar esse modelo para nada útil ainda, até você treinar ele. Treino é um processo caro e demorado. Geralmente é melhor utilizar um modelo pré-treinado para obter melhores resultados mais rápido, enquanto usa apenas uma fração dos recursos necessários para treinar.</p> <p>Criar um modelo pré-treinado com <code>from_pretrained()</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>model = DistilBertModel.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Quando você carregar os pesos pré-treinados, a configuração padrão do modelo é automaticamente carregada se o modelo é provido pelo 🤗 Transformers. No entanto, você ainda consegue mudar - alguns ou todos - os atributos padrões de configuração do modelo com os seus próprio atributos, se você preferir: </p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>model = DistilBertModel.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>, config=my_config)<!-- HTML_TAG_END --></pre></div></div></div> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <span>TensorFlow</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide TensorFlow content</span></div></div> <div class="framework-content"> <p>Carregar os seus próprios atributos padrões de contiguração no modelo:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFDistilBertModel <span class="hljs-meta">&gt;&gt;&gt; </span>my_config = DistilBertConfig.from_pretrained(<span class="hljs-string">&quot;./your_model_save_path/my_config.json&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_model = TFDistilBertModel(my_config)<!-- HTML_TAG_END --></pre></div> <p>Isso cria um modelo com valores aleatórios ao invés de pré-treinar os pesos. Você não irá conseguir usar usar esse modelo para nada útil ainda, até você treinar ele. Treino é um processo caro e demorado. Geralmente é melhor utilizar um modelo pré-treinado para obter melhores resultados mais rápido, enquanto usa apenas uma fração dos recursos necessários para treinar.</p> <p>Criar um modelo pré-treinado com <code>from_pretrained()</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tf_model = TFDistilBertModel.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Quando você carregar os pesos pré-treinados, a configuração padrão do modelo é automaticamente carregada se o modelo é provido pelo 🤗 Transformers. No entanto, você ainda consegue mudar - alguns ou todos - os atributos padrões de configuração do modelo com os seus próprio atributos, se você preferir: </p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tf_model = TFDistilBertModel.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>, config=my_config)<!-- HTML_TAG_END --></pre></div> </div></div> </div> <h3 class="relative group"><a id="heads-do-modelo" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#heads-do-modelo"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Heads do modelo </span></h3> <p>Neste ponto, você tem um modelo básico do DistilBERT que gera os <em>estados ocultos</em>. Os estados ocultos são passados como entrada para a head do moelo para produzir a saída final. 🤗 Transformers fornece uma head de modelo diferente para cada tarefa desde que o modelo suporte essa tarefa (por exemplo, você não consegue utilizar o modelo DistilBERT para uma tarefa de ‘sequence-to-sequence’ como tradução).</p> <div class="space-y-10 py-6 2xl:py-8 2xl:-mx-4"> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <span>Pytorch</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide Pytorch content</span></div></div> <div class="framework-content"> <p>Por exemplo, <code>DistilBertForSequenceClassification</code> é um modelo DistilBERT base com uma head de classificação de sequência. A head de calssificação de sequência é uma camada linear no topo das saídas agrupadas.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DistilBertForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model = DistilBertForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Reutilize facilmente esse ponto de parada para outra tarefe mudando para uma head de modelo diferente. Para uma tarefe de responder questões, você usaria a head do modelo <code>DistilBertForQuestionAnswering</code>. A head de responder questões é similar com a de classificação de sequências exceto o fato de que ela é uma camada no topo dos estados das saídas ocultas.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DistilBertForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span>model = DistilBertForQuestionAnswering.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)<!-- HTML_TAG_END --></pre></div></div></div> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <span>TensorFlow</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide TensorFlow content</span></div></div> <div class="framework-content"> <p>Por exemplo, <code>TFDistilBertForSequenceClassification</code> é um modelo DistilBERT base com uma head de classificação de sequência. A head de calssificação de sequência é uma camada linear no topo das saídas agrupadas.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFDistilBertForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>tf_model = TFDistilBertForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Reutilize facilmente esse ponto de parada para outra tarefe mudando para uma head de modelo diferente. Para uma tarefe de responder questões, você usaria a head do modelo <code>TFDistilBertForQuestionAnswering</code>. A head de responder questões é similar com a de classificação de sequências exceto o fato de que ela é uma camada no topo dos estados das saídas ocultas.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFDistilBertForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span>tf_model = TFDistilBertForQuestionAnswering.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)<!-- HTML_TAG_END --></pre></div> </div></div> </div> <h2 class="relative group"><a id="tokenizer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#tokenizer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Tokenizer </span></h2> <p>A útlima classe base que você precisa antes de usar um modelo para dados textuais é a <a href="main_classes/tokenizer">tokenizer</a> para converter textos originais para tensores. Existem dois tipos de tokenizers que você pode usar com 🤗 Transformers:</p> <ul><li><code>PreTrainedTokenizer</code>: uma implementação em Python de um tokenizer.</li> <li><code>PreTrainedTokenizerFast</code>: um tokenizer da nossa biblioteca <a href="https://huggingface.co/docs/tokenizers/python/latest/" rel="nofollow">🤗 Tokenizer</a> baseada em Rust. Esse tipo de tokenizer é significantemente mais rapido - especialmente durante tokenization de codificação - devido a implementação em Rust. O tokenizer rápido tambem oferece métodos adicionais como <em>offset mapping</em> que mapeia tokens para suar palavras ou caracteres originais.</li></ul> <p>Os dois tokenizers suporta métodos comuns como os de codificar e decodificar, adicionar novos tokens, e gerenciar tokens especiais.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"><p>Nem todo modelo suporta um ‘fast tokenizer’. De uma olhada aqui <a href="index#supported-frameworks">table</a> pra checar se um modelo suporta ‘fast tokenizer’.</p></div> <p>Se você treinou seu prórpio tokenizer, você pode criar um a partir do seu arquivo <em>vocabulary</em>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DistilBertTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>my_tokenizer = DistilBertTokenizer(vocab_file=<span class="hljs-string">&quot;my_vocab_file.txt&quot;</span>, do_lower_case=<span class="hljs-literal">False</span>, padding_side=<span class="hljs-string">&quot;left&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>É importante lembrar que o vocabulário de um tokenizer customizado será diferente de um vocabulário gerado pelo tokenizer de um modelo pré treinado. Você precisa usar o vocabulário de um modelo pré treinado se você estiver usando um modelo pré treinado, caso contrário as entradas não farão sentido. Criando um tokenizer com um vocabulário de um modelo pré treinado com a classe <code>DistilBertTokenizer</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DistilBertTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>slow_tokenizer = DistilBertTokenizer.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Criando um ‘fast tokenizer’ com a classe <code>DistilBertTokenizerFast</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DistilBertTokenizerFast <span class="hljs-meta">&gt;&gt;&gt; </span>fast_tokenizer = DistilBertTokenizerFast.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Pos padrão, <code>AutoTokenizer</code> tentará carregar um ‘fast tokenizer’. Você pode disabilitar esse comportamento colocando <code>use_fast=False</code> no <code>from_pretrained</code>.</p></div> <h2 class="relative group"><a id="extrator-de-features" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#extrator-de-features"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Extrator de features </span></h2> <p>Um extrator de features processa entradas de imagem ou áudio. Ele herda da classe base <code>FeatureExtractionMixin</code>, e pode também herdar da classe <code>ImageFeatureExtractionMixin</code> para processamento de features de imagem ou da classe <code>SequenceFeatureExtractor</code> para processamento de entradas de áudio.</p> <p>Dependendo do que você está trabalhando em um audio ou uma tarefa de visão, crie um estrator de features associado com o modelo que você está usando. Por exemplo, crie um <code>ViTFeatureExtractor</code> padrão se você estiver usando <a href="model_doc/vit">ViT</a> para classificação de imagens:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ViTFeatureExtractor <span class="hljs-meta">&gt;&gt;&gt; </span>vit_extractor = ViTFeatureExtractor() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(vit_extractor) ViTFeatureExtractor { <span class="hljs-string">&quot;do_normalize&quot;</span>: true, <span class="hljs-string">&quot;do_resize&quot;</span>: true, <span class="hljs-string">&quot;feature_extractor_type&quot;</span>: <span class="hljs-string">&quot;ViTFeatureExtractor&quot;</span>, <span class="hljs-string">&quot;image_mean&quot;</span>: [ <span class="hljs-number">0.5</span>, <span class="hljs-number">0.5</span>, <span class="hljs-number">0.5</span> ], <span class="hljs-string">&quot;image_std&quot;</span>: [ <span class="hljs-number">0.5</span>, <span class="hljs-number">0.5</span>, <span class="hljs-number">0.5</span> ], <span class="hljs-string">&quot;resample&quot;</span>: <span class="hljs-number">2</span>, <span class="hljs-string">&quot;size&quot;</span>: <span class="hljs-number">224</span> }<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Se você não estiver procurando por nenhuma customização, apenas use o método <code>from_pretrained</code> para carregar parâmetros do modelo de extrator de features padrão.</p></div> <p>Modifique qualquer parâmetro dentre os <code>ViTFeatureExtractor</code> para criar seu extrator de features customizado.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ViTFeatureExtractor <span class="hljs-meta">&gt;&gt;&gt; </span>my_vit_extractor = ViTFeatureExtractor(resample=<span class="hljs-string">&quot;PIL.Image.BOX&quot;</span>, do_normalize=<span class="hljs-literal">False</span>, image_mean=[<span class="hljs-number">0.3</span>, <span class="hljs-number">0.3</span>, <span class="hljs-number">0.3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(my_vit_extractor) ViTFeatureExtractor { <span class="hljs-string">&quot;do_normalize&quot;</span>: false, <span class="hljs-string">&quot;do_resize&quot;</span>: true, <span class="hljs-string">&quot;feature_extractor_type&quot;</span>: <span class="hljs-string">&quot;ViTFeatureExtractor&quot;</span>, <span class="hljs-string">&quot;image_mean&quot;</span>: [ <span class="hljs-number">0.3</span>, <span class="hljs-number">0.3</span>, <span class="hljs-number">0.3</span> ], <span class="hljs-string">&quot;image_std&quot;</span>: [ <span class="hljs-number">0.5</span>, <span class="hljs-number">0.5</span>, <span class="hljs-number">0.5</span> ], <span class="hljs-string">&quot;resample&quot;</span>: <span class="hljs-string">&quot;PIL.Image.BOX&quot;</span>, <span class="hljs-string">&quot;size&quot;</span>: <span class="hljs-number">224</span> }<!-- HTML_TAG_END --></pre></div> <p>Para entradas de áutio, você pode criar um <code>Wav2Vec2FeatureExtractor</code> e customizar os parâmetros de uma forma similar:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2FeatureExtractor <span class="hljs-meta">&gt;&gt;&gt; </span>w2v2_extractor = Wav2Vec2FeatureExtractor() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(w2v2_extractor) Wav2Vec2FeatureExtractor { <span class="hljs-string">&quot;do_normalize&quot;</span>: true, <span class="hljs-string">&quot;feature_extractor_type&quot;</span>: <span class="hljs-string">&quot;Wav2Vec2FeatureExtractor&quot;</span>, <span class="hljs-string">&quot;feature_size&quot;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&quot;padding_side&quot;</span>: <span class="hljs-string">&quot;right&quot;</span>, <span class="hljs-string">&quot;padding_value&quot;</span>: <span class="hljs-number">0.0</span>, <span class="hljs-string">&quot;return_attention_mask&quot;</span>: false, <span class="hljs-string">&quot;sampling_rate&quot;</span>: <span class="hljs-number">16000</span> }<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="processor" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#processor"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Processor </span></h2> <p>Para modelos que suportam tarefas multimodais, 🤗 Transformers oferece uma classe processadora que convenientemente cobre um extrator de features e tokenizer dentro de um único objeto. Por exemplo, vamos usar o <code>Wav2Vec2Processor</code> para uma tarefa de reconhecimento de fala automática (ASR). ASR transcreve áudio para texto, então você irá precisar de um extrator de um features e um tokenizer.</p> <p>Crie um extrator de features para lidar com as entradas de áudio.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2FeatureExtractor <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = Wav2Vec2FeatureExtractor(padding_value=<span class="hljs-number">1.0</span>, do_normalize=<span class="hljs-literal">True</span>)<!-- HTML_TAG_END --></pre></div> <p>Crie um tokenizer para lidar com a entrada de textos:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2CTCTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = Wav2Vec2CTCTokenizer(vocab_file=<span class="hljs-string">&quot;my_vocab_file.txt&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Combine o extrator de features e o tokenizer no <code>Wav2Vec2Processor</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2Processor <span class="hljs-meta">&gt;&gt;&gt; </span>processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)<!-- HTML_TAG_END --></pre></div> <p>Com duas classes básicas - configuração e modelo - e um preprocessamento de classe adicional (tokenizer, extrator de features, ou processador), você pode criar qualquer modelo que suportado por 🤗 Transformers. Qualquer uma dessas classes base são configuráveis, te permitindo usar os atributos específicos que você queira. Você pode facilmente preparar um modelo para treinamento ou modificar um modelo pré-treinado com poucas mudanças.</p> <script type="module" data-hydrate="1h2erfv"> import { start } from "/docs/transformers/pr_18789/pt/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="1h2erfv"]').parentNode, paths: {"base":"/docs/transformers/pr_18789/pt","assets":"/docs/transformers/pr_18789/pt"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_18789/pt/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_18789/pt/_app/pages/create_a_model.mdx-hf-doc-builder.js") ], params: {} } }); </script>
465
0
hf_public_repos/doc-build-dev/transformers/pr_18789/pt
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/_app/start-hf-doc-builder.js
var de=Object.defineProperty,_e=Object.defineProperties;var pe=Object.getOwnPropertyDescriptors;var B=Object.getOwnPropertySymbols;var ee=Object.prototype.hasOwnProperty,te=Object.prototype.propertyIsEnumerable;var Z=(a,e,t)=>e in a?de(a,e,{enumerable:!0,configurable:!0,writable:!0,value:t}):a[e]=t,b=(a,e)=>{for(var t in e||(e={}))ee.call(e,t)&&Z(a,t,e[t]);if(B)for(var t of B(e))te.call(e,t)&&Z(a,t,e[t]);return a},W=(a,e)=>_e(a,pe(e));var se=(a,e)=>{var t={};for(var r in a)ee.call(a,r)&&e.indexOf(r)<0&&(t[r]=a[r]);if(a!=null&&B)for(var r of B(a))e.indexOf(r)<0&&te.call(a,r)&&(t[r]=a[r]);return t};import{S as ge,i as me,s as we,e as be,c as ve,a as ye,d as R,b as G,f as U,g as O,t as ke,h as $e,j as Ee,k as Re,l as v,m as Le,n as I,o as y,p as D,q as k,r as Se,u as Oe,v as F,w as A,x as C,y as P,z as j,A as q,B as T,C as z,D as Y,E as re}from"./chunks/vendor-hf-doc-builder.js";import{s as Ae,b as Pe}from"./chunks/paths-hf-doc-builder.js";function Te(a){let e,t,r;const o=[a[1]||{}];var i=a[0][0];function l(s){let n={};for(let c=0;c<o.length;c+=1)n=z(n,o[c]);return{props:n}}return i&&(e=new i(l())),{c(){e&&A(e.$$.fragment),t=v()},l(s){e&&C(e.$$.fragment,s),t=v()},m(s,n){e&&P(e,s,n),O(s,t,n),r=!0},p(s,n){const c=n&2?j(o,[q(s[1]||{})]):{};if(i!==(i=s[0][0])){if(e){I();const f=e;y(f.$$.fragment,1,0,()=>{T(f,1)}),D()}i?(e=new i(l()),A(e.$$.fragment),k(e.$$.fragment,1),P(e,t.parentNode,t)):e=null}else i&&e.$set(c)},i(s){r||(e&&k(e.$$.fragment,s),r=!0)},o(s){e&&y(e.$$.fragment,s),r=!1},d(s){s&&R(t),e&&T(e,s)}}}function xe(a){let e,t,r;const o=[a[1]||{}];var i=a[0][0];function l(s){let n={$$slots:{default:[De]},$$scope:{ctx:s}};for(let c=0;c<o.length;c+=1)n=z(n,o[c]);return{props:n}}return i&&(e=new i(l(a))),{c(){e&&A(e.$$.fragment),t=v()},l(s){e&&C(e.$$.fragment,s),t=v()},m(s,n){e&&P(e,s,n),O(s,t,n),r=!0},p(s,n){const c=n&2?j(o,[q(s[1]||{})]):{};if(n&525&&(c.$$scope={dirty:n,ctx:s}),i!==(i=s[0][0])){if(e){I();const f=e;y(f.$$.fragment,1,0,()=>{T(f,1)}),D()}i?(e=new i(l(s)),A(e.$$.fragment),k(e.$$.fragment,1),P(e,t.parentNode,t)):e=null}else i&&e.$set(c)},i(s){r||(e&&k(e.$$.fragment,s),r=!0)},o(s){e&&y(e.$$.fragment,s),r=!1},d(s){s&&R(t),e&&T(e,s)}}}function Ue(a){let e,t,r;const o=[a[2]||{}];var i=a[0][1];function l(s){let n={};for(let c=0;c<o.length;c+=1)n=z(n,o[c]);return{props:n}}return i&&(e=new i(l())),{c(){e&&A(e.$$.fragment),t=v()},l(s){e&&C(e.$$.fragment,s),t=v()},m(s,n){e&&P(e,s,n),O(s,t,n),r=!0},p(s,n){const c=n&4?j(o,[q(s[2]||{})]):{};if(i!==(i=s[0][1])){if(e){I();const f=e;y(f.$$.fragment,1,0,()=>{T(f,1)}),D()}i?(e=new i(l()),A(e.$$.fragment),k(e.$$.fragment,1),P(e,t.parentNode,t)):e=null}else i&&e.$set(c)},i(s){r||(e&&k(e.$$.fragment,s),r=!0)},o(s){e&&y(e.$$.fragment,s),r=!1},d(s){s&&R(t),e&&T(e,s)}}}function Ve(a){let e,t,r;const o=[a[2]||{}];var i=a[0][1];function l(s){let n={$$slots:{default:[Ie]},$$scope:{ctx:s}};for(let c=0;c<o.length;c+=1)n=z(n,o[c]);return{props:n}}return i&&(e=new i(l(a))),{c(){e&&A(e.$$.fragment),t=v()},l(s){e&&C(e.$$.fragment,s),t=v()},m(s,n){e&&P(e,s,n),O(s,t,n),r=!0},p(s,n){const c=n&4?j(o,[q(s[2]||{})]):{};if(n&521&&(c.$$scope={dirty:n,ctx:s}),i!==(i=s[0][1])){if(e){I();const f=e;y(f.$$.fragment,1,0,()=>{T(f,1)}),D()}i?(e=new i(l(s)),A(e.$$.fragment),k(e.$$.fragment,1),P(e,t.parentNode,t)):e=null}else i&&e.$set(c)},i(s){r||(e&&k(e.$$.fragment,s),r=!0)},o(s){e&&y(e.$$.fragment,s),r=!1},d(s){s&&R(t),e&&T(e,s)}}}function Ie(a){let e,t,r;const o=[a[3]||{}];var i=a[0][2];function l(s){let n={};for(let c=0;c<o.length;c+=1)n=z(n,o[c]);return{props:n}}return i&&(e=new i(l())),{c(){e&&A(e.$$.fragment),t=v()},l(s){e&&C(e.$$.fragment,s),t=v()},m(s,n){e&&P(e,s,n),O(s,t,n),r=!0},p(s,n){const c=n&8?j(o,[q(s[3]||{})]):{};if(i!==(i=s[0][2])){if(e){I();const f=e;y(f.$$.fragment,1,0,()=>{T(f,1)}),D()}i?(e=new i(l()),A(e.$$.fragment),k(e.$$.fragment,1),P(e,t.parentNode,t)):e=null}else i&&e.$set(c)},i(s){r||(e&&k(e.$$.fragment,s),r=!0)},o(s){e&&y(e.$$.fragment,s),r=!1},d(s){s&&R(t),e&&T(e,s)}}}function De(a){let e,t,r,o;const i=[Ve,Ue],l=[];function s(n,c){return n[0][2]?0:1}return e=s(a),t=l[e]=i[e](a),{c(){t.c(),r=v()},l(n){t.l(n),r=v()},m(n,c){l[e].m(n,c),O(n,r,c),o=!0},p(n,c){let f=e;e=s(n),e===f?l[e].p(n,c):(I(),y(l[f],1,1,()=>{l[f]=null}),D(),t=l[e],t?t.p(n,c):(t=l[e]=i[e](n),t.c()),k(t,1),t.m(r.parentNode,r))},i(n){o||(k(t),o=!0)},o(n){y(t),o=!1},d(n){l[e].d(n),n&&R(r)}}}function ie(a){let e,t=a[5]&&ne(a);return{c(){e=be("div"),t&&t.c(),this.h()},l(r){e=ve(r,"DIV",{id:!0,"aria-live":!0,"aria-atomic":!0,style:!0});var o=ye(e);t&&t.l(o),o.forEach(R),this.h()},h(){G(e,"id","svelte-announcer"),G(e,"aria-live","assertive"),G(e,"aria-atomic","true"),U(e,"position","absolute"),U(e,"left","0"),U(e,"top","0"),U(e,"clip","rect(0 0 0 0)"),U(e,"clip-path","inset(50%)"),U(e,"overflow","hidden"),U(e,"white-space","nowrap"),U(e,"width","1px"),U(e,"height","1px")},m(r,o){O(r,e,o),t&&t.m(e,null)},p(r,o){r[5]?t?t.p(r,o):(t=ne(r),t.c(),t.m(e,null)):t&&(t.d(1),t=null)},d(r){r&&R(e),t&&t.d()}}}function ne(a){let e;return{c(){e=ke(a[6])},l(t){e=$e(t,a[6])},m(t,r){O(t,e,r)},p(t,r){r&64&&Ee(e,t[6])},d(t){t&&R(e)}}}function Ne(a){let e,t,r,o,i;const l=[xe,Te],s=[];function n(f,h){return f[0][1]?0:1}e=n(a),t=s[e]=l[e](a);let c=a[4]&&ie(a);return{c(){t.c(),r=Re(),c&&c.c(),o=v()},l(f){t.l(f),r=Le(f),c&&c.l(f),o=v()},m(f,h){s[e].m(f,h),O(f,r,h),c&&c.m(f,h),O(f,o,h),i=!0},p(f,[h]){let u=e;e=n(f),e===u?s[e].p(f,h):(I(),y(s[u],1,1,()=>{s[u]=null}),D(),t=s[e],t?t.p(f,h):(t=s[e]=l[e](f),t.c()),k(t,1),t.m(r.parentNode,r)),f[4]?c?c.p(f,h):(c=ie(f),c.c(),c.m(o.parentNode,o)):c&&(c.d(1),c=null)},i(f){i||(k(t),i=!0)},o(f){y(t),i=!1},d(f){s[e].d(f),f&&R(r),c&&c.d(f),f&&R(o)}}}function Ce(a,e,t){let{stores:r}=e,{page:o}=e,{components:i}=e,{props_0:l=null}=e,{props_1:s=null}=e,{props_2:n=null}=e;Se("__svelte__",r),Oe(r.page.notify);let c=!1,f=!1,h=null;return F(()=>{const u=r.page.subscribe(()=>{c&&(t(5,f=!0),t(6,h=document.title||"untitled page"))});return t(4,c=!0),u}),a.$$set=u=>{"stores"in u&&t(7,r=u.stores),"page"in u&&t(8,o=u.page),"components"in u&&t(0,i=u.components),"props_0"in u&&t(1,l=u.props_0),"props_1"in u&&t(2,s=u.props_1),"props_2"in u&&t(3,n=u.props_2)},a.$$.update=()=>{a.$$.dirty&384&&r.page.set(o)},[i,l,s,n,c,f,h,r,o]}class je extends ge{constructor(e){super();me(this,e,Ce,Ne,we,{stores:7,page:8,components:0,props_0:1,props_1:2,props_2:3})}}const qe="modulepreload",ae={},ze="/docs/transformers/pr_18789/pt/_app/",$=function(e,t){return!t||t.length===0?e():Promise.all(t.map(r=>{if(r=`${ze}${r}`,r in ae)return;ae[r]=!0;const o=r.endsWith(".css"),i=o?'[rel="stylesheet"]':"";if(document.querySelector(`link[href="${r}"]${i}`))return;const l=document.createElement("link");if(l.rel=o?"stylesheet":qe,o||(l.as="script",l.crossOrigin=""),l.href=r,document.head.appendChild(l),o)return new Promise((s,n)=>{l.addEventListener("load",s),l.addEventListener("error",n)})})).then(()=>e())},d=[()=>$(()=>import("./pages/__layout.svelte-hf-doc-builder.js"),["pages/__layout.svelte-hf-doc-builder.js","assets/pages/__layout.svelte-hf-doc-builder.css","chunks/vendor-hf-doc-builder.js","chunks/paths-hf-doc-builder.js"]),()=>$(()=>import("./error.svelte-hf-doc-builder.js"),["error.svelte-hf-doc-builder.js","chunks/vendor-hf-doc-builder.js"]),()=>$(()=>import("./pages/index.mdx-hf-doc-builder.js"),["pages/index.mdx-hf-doc-builder.js","chunks/vendor-hf-doc-builder.js","chunks/IconCopyLink-hf-doc-builder.js"]),()=>$(()=>import("./pages/pipeline_tutorial.mdx-hf-doc-builder.js"),["pages/pipeline_tutorial.mdx-hf-doc-builder.js","chunks/vendor-hf-doc-builder.js","chunks/Tip-hf-doc-builder.js","chunks/IconCopyLink-hf-doc-builder.js","chunks/CodeBlock-hf-doc-builder.js"]),()=>$(()=>import("./pages/fast_tokenizers.mdx-hf-doc-builder.js"),["pages/fast_tokenizers.mdx-hf-doc-builder.js","chunks/vendor-hf-doc-builder.js","chunks/IconCopyLink-hf-doc-builder.js","chunks/CodeBlock-hf-doc-builder.js"]),()=>$(()=>import("./pages/create_a_model.mdx-hf-doc-builder.js"),["pages/create_a_model.mdx-hf-doc-builder.js","chunks/vendor-hf-doc-builder.js","chunks/Tip-hf-doc-builder.js","chunks/IconCopyLink-hf-doc-builder.js","chunks/CodeBlock-hf-doc-builder.js","chunks/Markdown-hf-doc-builder.js"]),()=>$(()=>import("./pages/installation.mdx-hf-doc-builder.js"),["pages/installation.mdx-hf-doc-builder.js","chunks/vendor-hf-doc-builder.js","chunks/Tip-hf-doc-builder.js","chunks/IconCopyLink-hf-doc-builder.js","chunks/CodeBlock-hf-doc-builder.js"]),()=>$(()=>import("./pages/multilingual.mdx-hf-doc-builder.js"),["pages/multilingual.mdx-hf-doc-builder.js","chunks/vendor-hf-doc-builder.js","chunks/IconCopyLink-hf-doc-builder.js","chunks/CodeBlock-hf-doc-builder.js","chunks/DocNotebookDropdown-hf-doc-builder.js"]),()=>$(()=>import("./pages/accelerate.mdx-hf-doc-builder.js"),["pages/accelerate.mdx-hf-doc-builder.js","chunks/vendor-hf-doc-builder.js","chunks/IconCopyLink-hf-doc-builder.js","chunks/CodeBlock-hf-doc-builder.js"]),()=>$(()=>import("./pages/quicktour.mdx-hf-doc-builder.js"),["pages/quicktour.mdx-hf-doc-builder.js","chunks/vendor-hf-doc-builder.js","chunks/Tip-hf-doc-builder.js","chunks/Youtube-hf-doc-builder.js","chunks/IconCopyLink-hf-doc-builder.js","chunks/CodeBlock-hf-doc-builder.js","chunks/DocNotebookDropdown-hf-doc-builder.js","chunks/Markdown-hf-doc-builder.js"]),()=>$(()=>import("./pages/training.mdx-hf-doc-builder.js"),["pages/training.mdx-hf-doc-builder.js","chunks/vendor-hf-doc-builder.js","chunks/Tip-hf-doc-builder.js","chunks/Youtube-hf-doc-builder.js","chunks/IconCopyLink-hf-doc-builder.js","chunks/CodeBlock-hf-doc-builder.js","chunks/DocNotebookDropdown-hf-doc-builder.js"]),()=>$(()=>import("./pages/tasks/sequence_classification.mdx-hf-doc-builder.js"),["pages/tasks/sequence_classification.mdx-hf-doc-builder.js","chunks/vendor-hf-doc-builder.js","chunks/Tip-hf-doc-builder.js","chunks/Youtube-hf-doc-builder.js","chunks/IconCopyLink-hf-doc-builder.js","chunks/CodeBlock-hf-doc-builder.js","chunks/Markdown-hf-doc-builder.js"]),()=>$(()=>import("./pages/tasks/token_classification.mdx-hf-doc-builder.js"),["pages/tasks/token_classification.mdx-hf-doc-builder.js","chunks/vendor-hf-doc-builder.js","chunks/Tip-hf-doc-builder.js","chunks/Youtube-hf-doc-builder.js","chunks/IconCopyLink-hf-doc-builder.js","chunks/CodeBlock-hf-doc-builder.js","chunks/Markdown-hf-doc-builder.js"])],Je=[[/^\/$/,[d[0],d[2]],[d[1]]],[/^\/pipeline_tutorial\/?$/,[d[0],d[3]],[d[1]]],[/^\/fast_tokenizers\/?$/,[d[0],d[4]],[d[1]]],[/^\/create_a_model\/?$/,[d[0],d[5]],[d[1]]],[/^\/installation\/?$/,[d[0],d[6]],[d[1]]],[/^\/multilingual\/?$/,[d[0],d[7]],[d[1]]],[/^\/accelerate\/?$/,[d[0],d[8]],[d[1]]],[/^\/quicktour\/?$/,[d[0],d[9]],[d[1]]],[/^\/training\/?$/,[d[0],d[10]],[d[1]]],[/^\/tasks\/sequence_classification\/?$/,[d[0],d[11]],[d[1]]],[/^\/tasks\/token_classification\/?$/,[d[0],d[12]],[d[1]]]],Ke=[d[0](),d[1]()];function Be(a){let e=a.baseURI;if(!e){const t=a.getElementsByTagName("base");e=t.length?t[0].href:a.URL}return e}function We(a,e){return a==="/"||e==="ignore"?a:e==="never"?a.endsWith("/")?a.slice(0,-1):a:e==="always"&&/\/[^./]+$/.test(a)?a+"/":a}const he="sveltekit:scroll";let N={};try{N=JSON.parse(sessionStorage[he])}catch{}function M(a){N[a]=X()}function X(){return{x:pageXOffset,y:pageYOffset}}function oe(a){return a.composedPath().find(t=>t instanceof Node&&t.nodeName.toUpperCase()==="A")}function le(a){return a instanceof SVGAElement?new URL(a.href.baseVal,document.baseURI):new URL(a.href)}class Ye{constructor({base:e,routes:t,trailing_slash:r,renderer:o}){var l,s;this.base=e,this.routes=t,this.trailing_slash=r,this.navigating=0,this.renderer=o,o.router=this,this.enabled=!0,this.initialized=!1,document.body.setAttribute("tabindex","-1"),this.current_history_index=(s=(l=history.state)==null?void 0:l["sveltekit:index"])!=null?s:0,this.current_history_index===0&&history.replaceState(W(b({},history.state),{"sveltekit:index":0}),"",location.href);const i=N[this.current_history_index];i&&scrollTo(i.x,i.y),this.hash_navigating=!1,this.callbacks={before_navigate:[],after_navigate:[]}}init_listeners(){history.scrollRestoration="manual",addEventListener("beforeunload",o=>{let i=!1;const l={from:this.renderer.current.url,to:null,cancel:()=>i=!0};this.callbacks.before_navigate.forEach(s=>s(l)),i?(o.preventDefault(),o.returnValue=""):history.scrollRestoration="auto"}),addEventListener("visibilitychange",()=>{if(document.visibilityState==="hidden"){M(this.current_history_index);try{sessionStorage[he]=JSON.stringify(N)}catch{}}});const e=o=>{const i=oe(o);i&&i.href&&i.hasAttribute("sveltekit:prefetch")&&this.prefetch(le(i))};let t;const r=o=>{clearTimeout(t),t=setTimeout(()=>{var i;(i=o.target)==null||i.dispatchEvent(new CustomEvent("sveltekit:trigger_prefetch",{bubbles:!0}))},20)};addEventListener("touchstart",e),addEventListener("mousemove",r),addEventListener("sveltekit:trigger_prefetch",e),addEventListener("click",o=>{if(!this.enabled||o.button||o.which!==1||o.metaKey||o.ctrlKey||o.shiftKey||o.altKey||o.defaultPrevented)return;const i=oe(o);if(!i||!i.href)return;const l=i instanceof SVGAElement,s=le(i);if(s.toString()===location.href){location.hash||o.preventDefault();return}if(!l&&s.origin==="null")return;const c=(i.getAttribute("rel")||"").split(/\s+/);if(i.hasAttribute("download")||c&&c.includes("external")||(l?i.target.baseVal:i.target))return;const[f,h]=s.href.split("#");if(h!==void 0&&f===location.href.split("#")[0]){this.hash_navigating=!0,M(this.current_history_index),this.renderer.update_page_store(new URL(s.href));return}this._navigate({url:s,scroll:i.hasAttribute("sveltekit:noscroll")?X():null,keepfocus:!1,chain:[],details:{state:{},replaceState:!1},accepted:()=>o.preventDefault(),blocked:()=>o.preventDefault()})}),addEventListener("popstate",o=>{if(o.state&&this.enabled){if(o.state["sveltekit:index"]===this.current_history_index)return;this._navigate({url:new URL(location.href),scroll:N[o.state["sveltekit:index"]],keepfocus:!1,chain:[],details:null,accepted:()=>{this.current_history_index=o.state["sveltekit:index"]},blocked:()=>{const i=this.current_history_index-o.state["sveltekit:index"];history.go(i)}})}}),addEventListener("hashchange",()=>{this.hash_navigating&&(this.hash_navigating=!1,history.replaceState(W(b({},history.state),{"sveltekit:index":++this.current_history_index}),"",location.href))}),this.initialized=!0}owns(e){return e.origin===location.origin&&e.pathname.startsWith(this.base)}parse(e){if(this.owns(e)){const t=decodeURI(e.pathname.slice(this.base.length)||"/");return{id:e.pathname+e.search,routes:this.routes.filter(([r])=>r.test(t)),url:e,path:t,initial:!this.initialized}}}async goto(e,{noscroll:t=!1,replaceState:r=!1,keepfocus:o=!1,state:i={}}={},l){const s=new URL(e,Be(document));return this.enabled?this._navigate({url:s,scroll:t?X():null,keepfocus:o,chain:l,details:{state:i,replaceState:r},accepted:()=>{},blocked:()=>{}}):(location.href=s.href,new Promise(()=>{}))}enable(){this.enabled=!0}disable(){this.enabled=!1}async prefetch(e){const t=this.parse(e);if(!t)throw new Error("Attempted to prefetch a URL that does not belong to this app");return this.renderer.load(t)}after_navigate(e){F(()=>(this.callbacks.after_navigate.push(e),()=>{const t=this.callbacks.after_navigate.indexOf(e);this.callbacks.after_navigate.splice(t,1)}))}before_navigate(e){F(()=>(this.callbacks.before_navigate.push(e),()=>{const t=this.callbacks.before_navigate.indexOf(e);this.callbacks.before_navigate.splice(t,1)}))}async _navigate({url:e,scroll:t,keepfocus:r,chain:o,details:i,accepted:l,blocked:s}){const n=this.renderer.current.url;let c=!1;const f={from:n,to:e,cancel:()=>c=!0};if(this.callbacks.before_navigate.forEach(p=>p(f)),c){s();return}const h=this.parse(e);if(!h)return location.href=e.href,new Promise(()=>{});M(this.current_history_index),l(),this.navigating++;const u=We(e.pathname,this.trailing_slash);h.url=new URL(e.origin+u+e.search+e.hash);const _=this.navigating_token={};if(await this.renderer.handle_navigation(h,o,!1,{scroll:t,keepfocus:r}),this.navigating--,this.navigating_token===_){if(!this.navigating){const p={from:n,to:e};this.callbacks.after_navigate.forEach(x=>x(p))}if(i){const p=i.replaceState?0:1;i.state["sveltekit:index"]=this.current_history_index+=p,history[i.replaceState?"replaceState":"pushState"](i.state,"",h.url)}}}}function ce(a){return a instanceof Error||a&&a.name&&a.message?a:new Error(JSON.stringify(a))}function Ge(a){let e=5381,t=a.length;if(typeof a=="string")for(;t;)e=e*33^a.charCodeAt(--t);else for(;t;)e=e*33^a[--t];return(e>>>0).toString(36)}function fe(a){const e=a.status&&a.status>=400&&a.status<=599&&!a.redirect;if(a.error||e){const t=a.status;if(!a.error&&e)return{status:t||500,error:new Error};const r=typeof a.error=="string"?new Error(a.error):a.error;return r instanceof Error?!t||t<400||t>599?(console.warn('"error" returned from load() without a valid status code \u2014 defaulting to 500'),{status:500,error:r}):{status:t,error:r}:{status:500,error:new Error(`"error" property returned from load() must be a string or instance of Error, received type "${typeof r}"`)}}if(a.redirect){if(!a.status||Math.floor(a.status/100)!==3)return{status:500,error:new Error('"redirect" property returned from load() must be accompanied by a 3xx status code')};if(typeof a.redirect!="string")return{status:500,error:new Error('"redirect" property returned from load() must be a string')}}if(a.context)throw new Error('You are returning "context" from a load function. "context" was renamed to "stuff", please adjust your code accordingly.');return a}function ue(a){const e=Y(a);let t=!0;function r(){t=!0,e.update(l=>l)}function o(l){t=!1,e.set(l)}function i(l){let s;return e.subscribe(n=>{(s===void 0||t&&n!==s)&&l(s=n)})}return{notify:r,set:o,subscribe:i}}function Me(){const{set:a,subscribe:e}=Y(!1),t="1665401523930";let r;async function o(){clearTimeout(r);const l=await fetch(`${Pe}/_app/version.json`,{headers:{pragma:"no-cache","cache-control":"no-cache"}});if(l.ok){const{version:s}=await l.json(),n=s!==t;return n&&(a(!0),clearTimeout(r)),n}else throw new Error(`Version check failed: ${l.status}`)}return{subscribe:e,check:o}}function Fe(a,e){const t=typeof a=="string"?a:a.url;let r=`script[data-type="svelte-data"][data-url=${JSON.stringify(t)}]`;e&&typeof e.body=="string"&&(r+=`[data-body="${Ge(e.body)}"]`);const o=document.querySelector(r);if(o&&o.textContent){const i=JSON.parse(o.textContent),{body:l}=i,s=se(i,["body"]);return Promise.resolve(new Response(l,s))}return fetch(a,e)}class Xe{constructor({Root:e,fallback:t,target:r,session:o}){this.Root=e,this.fallback=t,this.router,this.target=r,this.started=!1,this.session_id=1,this.invalid=new Set,this.invalidating=null,this.autoscroll=!0,this.updating=!1,this.current={url:null,session_id:0,branch:[]},this.cache=new Map,this.loading={id:null,promise:null},this.stores={url:ue({}),page:ue({}),navigating:Y(null),session:Y(o),updated:Me()},this.$session=null,this.root=null;let i=!1;this.stores.session.subscribe(async l=>{if(this.$session=l,!i||!this.router)return;this.session_id+=1;const s=this.router.parse(new URL(location.href));s&&this.update(s,[],!0)}),i=!0}disable_scroll_handling(){(this.updating||!this.started)&&(this.autoscroll=!1)}async start({status:e,error:t,nodes:r,params:o}){const i=new URL(location.href),l=[];let s={},n,c;try{for(let f=0;f<r.length;f+=1){const h=f===r.length-1;let u;if(h){const p=document.querySelector('[data-type="svelte-props"]');p&&(u=JSON.parse(p.textContent))}const _=await this._load_node({module:await r[f],url:i,params:o,stuff:s,status:h?e:void 0,error:h?t:void 0,props:u});if(u&&(_.uses.dependencies.add(i.href),_.uses.url=!0),l.push(_),_&&_.loaded)if(_.loaded.error){if(t)throw _.loaded.error;c={status:_.loaded.status,error:_.loaded.error,url:i}}else _.loaded.stuff&&(s=b(b({},s),_.loaded.stuff))}n=c?await this._load_error(c):await this._get_navigation_result_from_branch({url:i,params:o,stuff:s,branch:l,status:e,error:t})}catch(f){if(t)throw f;n=await this._load_error({status:500,error:ce(f),url:i})}if(n.redirect){location.href=new URL(n.redirect,location.href).href;return}this._init(n)}async handle_navigation(e,t,r,o){this.started&&this.stores.navigating.set({from:this.current.url,to:e.url}),await this.update(e,t,r,o)}async update(e,t,r,o){var n,c,f;const i=this.token={};let l=await this._get_navigation_result(e,r);if(!l){location.href=e.url.href;return}if(i!==this.token)return;if(this.invalid.clear(),l.redirect)if(t.length>10||t.includes(e.url.pathname))l=await this._load_error({status:500,error:new Error("Redirect loop"),url:e.url});else{this.router?this.router.goto(new URL(l.redirect,e.url).href,{},[...t,e.url.pathname]):location.href=new URL(l.redirect,location.href).href;return}else if(((c=(n=l.props)==null?void 0:n.page)==null?void 0:c.status)>=400&&await this.stores.updated.check()){location.href=e.url.href;return}if(this.updating=!0,this.started?(this.current=l.state,this.root.$set(l.props),this.stores.navigating.set(null)):this._init(l),o){const{scroll:h,keepfocus:u}=o;if(u||((f=getSelection())==null||f.removeAllRanges(),document.body.focus()),await re(),this.autoscroll){const _=e.url.hash&&document.getElementById(e.url.hash.slice(1));h?scrollTo(h.x,h.y):_?_.scrollIntoView():scrollTo(0,0)}}else await re();if(this.loading.promise=null,this.loading.id=null,this.autoscroll=!0,this.updating=!1,l.props.page&&(this.page=l.props.page),!this.router)return;const s=l.state.branch[l.state.branch.length-1];s&&s.module.router===!1?this.router.disable():this.router.enable()}load(e){return this.loading.promise=this._get_navigation_result(e,!1),this.loading.id=e.id,this.loading.promise}invalidate(e){return this.invalid.add(e),this.invalidating||(this.invalidating=Promise.resolve().then(async()=>{const t=this.router&&this.router.parse(new URL(location.href));t&&await this.update(t,[],!0),this.invalidating=null})),this.invalidating}update_page_store(e){this.stores.page.set(W(b({},this.page),{url:e})),this.stores.page.notify()}_init(e){this.current=e.state;const t=document.querySelector("style[data-svelte]");if(t&&t.remove(),this.page=e.props.page,this.root=new this.Root({target:this.target,props:b({stores:this.stores},e.props),hydrate:!0}),this.started=!0,this.router){const r={from:null,to:new URL(location.href)};this.router.callbacks.after_navigate.forEach(o=>o(r))}}async _get_navigation_result(e,t){if(this.loading.id===e.id&&this.loading.promise)return this.loading.promise;for(let r=0;r<e.routes.length;r+=1){const o=e.routes[r];let i=r+1;for(;i<e.routes.length;){const s=e.routes[i];if(s[0].toString()===o[0].toString())s[1].forEach(n=>n()),i+=1;else break}const l=await this._load({route:o,info:e},t);if(l)return l}if(e.initial)return await this._load_error({status:404,error:new Error(`Not found: ${e.url.pathname}`),url:e.url})}async _get_navigation_result_from_branch({url:e,params:t,stuff:r,branch:o,status:i,error:l}){const s=o.filter(Boolean),n=s.find(u=>u.loaded&&u.loaded.redirect),c={redirect:n&&n.loaded?n.loaded.redirect:void 0,state:{url:e,params:t,branch:o,session_id:this.session_id},props:{components:s.map(u=>u.module.default)}};for(let u=0;u<s.length;u+=1){const _=s[u].loaded;c.props[`props_${u}`]=_?await _.props:null}if(!this.current.url||e.href!==this.current.url.href){c.props.page={url:e,params:t,status:i,error:l,stuff:r};const u=(_,p)=>{Object.defineProperty(c.props.page,_,{get:()=>{throw new Error(`$page.${_} has been replaced by $page.url.${p}`)}})};u("origin","origin"),u("path","pathname"),u("query","searchParams")}const f=s[s.length-1],h=f.loaded&&f.loaded.maxage;if(h){const u=e.pathname+e.search;let _=!1;const p=()=>{this.cache.get(u)===c&&this.cache.delete(u),L(),clearTimeout(x)},x=setTimeout(p,h*1e3),L=this.stores.session.subscribe(()=>{_&&p()});_=!0,this.cache.set(u,c)}return c}async _load_node({status:e,error:t,module:r,url:o,params:i,stuff:l,props:s}){const n={module:r,uses:{params:new Set,url:!1,session:!1,stuff:!1,dependencies:new Set},loaded:null,stuff:l};s&&n.uses.dependencies.add(o.href);const c={};for(const h in i)Object.defineProperty(c,h,{get(){return n.uses.params.add(h),i[h]},enumerable:!0});const f=this.$session;if(r.load){const{started:h}=this,u={params:c,props:s||{},get url(){return n.uses.url=!0,o},get session(){return n.uses.session=!0,f},get stuff(){return n.uses.stuff=!0,b({},l)},fetch(p,x){const L=typeof p=="string"?p:p.url,{href:S}=new URL(L,o);return n.uses.dependencies.add(S),h?fetch(p,x):Fe(p,x)}};t&&(u.status=e,u.error=t);const _=await r.load.call(null,u);if(!_)throw new Error("load function must return a value");n.loaded=fe(_),n.loaded.stuff&&(n.stuff=n.loaded.stuff)}else s&&(n.loaded=fe({props:s}));return n}async _load({route:e,info:{url:t,path:r}},o){const i=t.pathname+t.search;if(!o){const g=this.cache.get(i);if(g)return g}const[l,s,n,c,f]=e,h=c?c(l.exec(r)):{},u=this.current.url&&{url:i!==this.current.url.pathname+this.current.url.search,params:Object.keys(h).filter(g=>this.current.params[g]!==h[g]),session:this.session_id!==this.current.session_id};let _=[],p={},x=!1,L=200,S;s.forEach(g=>g());e:for(let g=0;g<s.length;g+=1){let m;try{if(!s[g])continue;const w=await s[g](),E=this.current.branch[g];if(!E||w!==E.module||u.url&&E.uses.url||u.params.some(V=>E.uses.params.has(V))||u.session&&E.uses.session||Array.from(E.uses.dependencies).some(V=>this.invalid.has(V))||x&&E.uses.stuff){let V={};const H=f&&g===s.length-1;if(H){const K=await fetch(`${t.pathname}${t.pathname.endsWith("/")?"":"/"}__data.json${t.search}`,{headers:{"x-sveltekit-load":"true"}});if(K.ok){const Q=K.headers.get("x-sveltekit-location");if(Q)return{redirect:Q,props:{},state:this.current};V=await K.json()}else L=K.status,S=new Error("Failed to load data")}if(S||(m=await this._load_node({module:w,url:t,params:h,props:V,stuff:p})),m&&(H&&(m.uses.url=!0),m.loaded)){if(m.loaded.fallthrough)return;if(m.loaded.error&&(L=m.loaded.status,S=m.loaded.error),m.loaded.redirect)return{redirect:m.loaded.redirect,props:{},state:this.current};m.loaded.stuff&&(x=!0)}}else m=E}catch(w){L=500,S=ce(w)}if(S){for(;g--;)if(n[g]){let w,E,J=g;for(;!(E=_[J]);)J-=1;try{if(w=await this._load_node({status:L,error:S,module:await n[g](),url:t,params:h,stuff:E.stuff}),w&&w.loaded&&w.loaded.error)continue;w&&w.loaded&&w.loaded.stuff&&(p=b(b({},p),w.loaded.stuff)),_=_.slice(0,J+1).concat(w);break e}catch{continue}}return await this._load_error({status:L,error:S,url:t})}else m&&m.loaded&&m.loaded.stuff&&(p=b(b({},p),m.loaded.stuff)),_.push(m)}return await this._get_navigation_result_from_branch({url:t,params:h,stuff:p,branch:_,status:L,error:S})}async _load_error({status:e,error:t,url:r}){var c,f;const o={},i=await this._load_node({module:await this.fallback[0],url:r,params:o,stuff:{}}),l=await this._load_node({status:e,error:t,module:await this.fallback[1],url:r,params:o,stuff:i&&i.loaded&&i.loaded.stuff||{}}),s=[i,l],n=b(b({},(c=i==null?void 0:i.loaded)==null?void 0:c.stuff),(f=l==null?void 0:l.loaded)==null?void 0:f.stuff);return await this._get_navigation_result_from_branch({url:r,params:o,stuff:n,branch:s,status:e,error:t})}}async function et({paths:a,target:e,session:t,route:r,spa:o,trailing_slash:i,hydrate:l}){const s=new Xe({Root:je,fallback:Ke,target:e,session:t}),n=r?new Ye({base:a.base,routes:Je,trailing_slash:i,renderer:s}):null;Ae(a),l&&await s.start(l),n&&(o&&n.goto(location.href,{replaceState:!0},[]),n.init_listeners()),dispatchEvent(new CustomEvent("sveltekit:start"))}export{et as start};
466
0
hf_public_repos/doc-build-dev/transformers/pr_18789/pt
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/_app/version.json
{"version":"1665401523930"}
467
0
hf_public_repos/doc-build-dev/transformers/pr_18789/pt
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/_app/manifest.json
{ ".svelte-kit/runtime/client/start.js": { "file": "start-hf-doc-builder.js", "src": ".svelte-kit/runtime/client/start.js", "isEntry": true, "imports": [ "_vendor-hf-doc-builder.js", "_paths-hf-doc-builder.js" ], "dynamicImports": [ "src/routes/__layout.svelte", ".svelte-kit/runtime/components/error.svelte", "src/routes/index.mdx", "src/routes/pipeline_tutorial.mdx", "src/routes/fast_tokenizers.mdx", "src/routes/create_a_model.mdx", "src/routes/installation.mdx", "src/routes/multilingual.mdx", "src/routes/accelerate.mdx", "src/routes/quicktour.mdx", "src/routes/training.mdx", "src/routes/tasks/sequence_classification.mdx", "src/routes/tasks/token_classification.mdx" ] }, "src/routes/__layout.svelte": { "file": "pages/__layout.svelte-hf-doc-builder.js", "src": "src/routes/__layout.svelte", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-hf-doc-builder.js", "_paths-hf-doc-builder.js" ], "css": [ "assets/pages/__layout.svelte-hf-doc-builder.css" ] }, ".svelte-kit/runtime/components/error.svelte": { "file": "error.svelte-hf-doc-builder.js", "src": ".svelte-kit/runtime/components/error.svelte", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-hf-doc-builder.js" ] }, "src/routes/index.mdx": { "file": "pages/index.mdx-hf-doc-builder.js", "src": "src/routes/index.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-hf-doc-builder.js", "_IconCopyLink-hf-doc-builder.js" ] }, "src/routes/pipeline_tutorial.mdx": { "file": "pages/pipeline_tutorial.mdx-hf-doc-builder.js", "src": "src/routes/pipeline_tutorial.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-hf-doc-builder.js", "_Tip-hf-doc-builder.js", "_IconCopyLink-hf-doc-builder.js", "_CodeBlock-hf-doc-builder.js" ] }, "src/routes/fast_tokenizers.mdx": { "file": "pages/fast_tokenizers.mdx-hf-doc-builder.js", "src": "src/routes/fast_tokenizers.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-hf-doc-builder.js", "_IconCopyLink-hf-doc-builder.js", "_CodeBlock-hf-doc-builder.js" ] }, "src/routes/create_a_model.mdx": { "file": "pages/create_a_model.mdx-hf-doc-builder.js", "src": "src/routes/create_a_model.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-hf-doc-builder.js", "_Tip-hf-doc-builder.js", "_IconCopyLink-hf-doc-builder.js", "_CodeBlock-hf-doc-builder.js", "_Markdown-hf-doc-builder.js" ] }, "src/routes/installation.mdx": { "file": "pages/installation.mdx-hf-doc-builder.js", "src": "src/routes/installation.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-hf-doc-builder.js", "_Tip-hf-doc-builder.js", "_IconCopyLink-hf-doc-builder.js", "_CodeBlock-hf-doc-builder.js" ] }, "src/routes/multilingual.mdx": { "file": "pages/multilingual.mdx-hf-doc-builder.js", "src": "src/routes/multilingual.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-hf-doc-builder.js", "_IconCopyLink-hf-doc-builder.js", "_CodeBlock-hf-doc-builder.js", "_DocNotebookDropdown-hf-doc-builder.js" ] }, "src/routes/accelerate.mdx": { "file": "pages/accelerate.mdx-hf-doc-builder.js", "src": "src/routes/accelerate.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-hf-doc-builder.js", "_IconCopyLink-hf-doc-builder.js", "_CodeBlock-hf-doc-builder.js" ] }, "src/routes/quicktour.mdx": { "file": "pages/quicktour.mdx-hf-doc-builder.js", "src": "src/routes/quicktour.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-hf-doc-builder.js", "_Tip-hf-doc-builder.js", "_Youtube-hf-doc-builder.js", "_IconCopyLink-hf-doc-builder.js", "_CodeBlock-hf-doc-builder.js", "_DocNotebookDropdown-hf-doc-builder.js", "_Markdown-hf-doc-builder.js" ] }, "src/routes/training.mdx": { "file": "pages/training.mdx-hf-doc-builder.js", "src": "src/routes/training.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-hf-doc-builder.js", "_Tip-hf-doc-builder.js", "_Youtube-hf-doc-builder.js", "_IconCopyLink-hf-doc-builder.js", "_CodeBlock-hf-doc-builder.js", "_DocNotebookDropdown-hf-doc-builder.js" ] }, "src/routes/tasks/sequence_classification.mdx": { "file": "pages/tasks/sequence_classification.mdx-hf-doc-builder.js", "src": "src/routes/tasks/sequence_classification.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-hf-doc-builder.js", "_Tip-hf-doc-builder.js", "_Youtube-hf-doc-builder.js", "_IconCopyLink-hf-doc-builder.js", "_CodeBlock-hf-doc-builder.js", "_Markdown-hf-doc-builder.js" ] }, "src/routes/tasks/token_classification.mdx": { "file": "pages/tasks/token_classification.mdx-hf-doc-builder.js", "src": "src/routes/tasks/token_classification.mdx", "isEntry": true, "isDynamicEntry": true, "imports": [ "_vendor-hf-doc-builder.js", "_Tip-hf-doc-builder.js", "_Youtube-hf-doc-builder.js", "_IconCopyLink-hf-doc-builder.js", "_CodeBlock-hf-doc-builder.js", "_Markdown-hf-doc-builder.js" ] }, "_vendor-hf-doc-builder.js": { "file": "chunks/vendor-hf-doc-builder.js" }, "_paths-hf-doc-builder.js": { "file": "chunks/paths-hf-doc-builder.js" }, "_IconCopyLink-hf-doc-builder.js": { "file": "chunks/IconCopyLink-hf-doc-builder.js", "imports": [ "_vendor-hf-doc-builder.js" ] }, "_Tip-hf-doc-builder.js": { "file": "chunks/Tip-hf-doc-builder.js", "imports": [ "_vendor-hf-doc-builder.js" ] }, "_CodeBlock-hf-doc-builder.js": { "file": "chunks/CodeBlock-hf-doc-builder.js", "imports": [ "_vendor-hf-doc-builder.js" ] }, "_Markdown-hf-doc-builder.js": { "file": "chunks/Markdown-hf-doc-builder.js", "imports": [ "_vendor-hf-doc-builder.js" ] }, "_DocNotebookDropdown-hf-doc-builder.js": { "file": "chunks/DocNotebookDropdown-hf-doc-builder.js", "imports": [ "_vendor-hf-doc-builder.js" ] }, "_Youtube-hf-doc-builder.js": { "file": "chunks/Youtube-hf-doc-builder.js", "imports": [ "_vendor-hf-doc-builder.js" ] } }
468
0
hf_public_repos/doc-build-dev/transformers/pr_18789/pt
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/_app/error.svelte-hf-doc-builder.js
import{S as h,i as w,s as y,e as E,t as v,c as d,a as b,h as P,d as o,g as u,G as R,j as N,k as S,l as C,m as j,L as G}from"./chunks/vendor-hf-doc-builder.js";function H(r){let l,t=r[1].frame+"",a;return{c(){l=E("pre"),a=v(t)},l(f){l=d(f,"PRE",{});var s=b(l);a=P(s,t),s.forEach(o)},m(f,s){u(f,l,s),R(l,a)},p(f,s){s&2&&t!==(t=f[1].frame+"")&&N(a,t)},d(f){f&&o(l)}}}function L(r){let l,t=r[1].stack+"",a;return{c(){l=E("pre"),a=v(t)},l(f){l=d(f,"PRE",{});var s=b(l);a=P(s,t),s.forEach(o)},m(f,s){u(f,l,s),R(l,a)},p(f,s){s&2&&t!==(t=f[1].stack+"")&&N(a,t)},d(f){f&&o(l)}}}function z(r){let l,t,a,f,s=r[1].message+"",c,k,n,p,i=r[1].frame&&H(r),_=r[1].stack&&L(r);return{c(){l=E("h1"),t=v(r[0]),a=S(),f=E("pre"),c=v(s),k=S(),i&&i.c(),n=S(),_&&_.c(),p=C()},l(e){l=d(e,"H1",{});var m=b(l);t=P(m,r[0]),m.forEach(o),a=j(e),f=d(e,"PRE",{});var q=b(f);c=P(q,s),q.forEach(o),k=j(e),i&&i.l(e),n=j(e),_&&_.l(e),p=C()},m(e,m){u(e,l,m),R(l,t),u(e,a,m),u(e,f,m),R(f,c),u(e,k,m),i&&i.m(e,m),u(e,n,m),_&&_.m(e,m),u(e,p,m)},p(e,[m]){m&1&&N(t,e[0]),m&2&&s!==(s=e[1].message+"")&&N(c,s),e[1].frame?i?i.p(e,m):(i=H(e),i.c(),i.m(n.parentNode,n)):i&&(i.d(1),i=null),e[1].stack?_?_.p(e,m):(_=L(e),_.c(),_.m(p.parentNode,p)):_&&(_.d(1),_=null)},i:G,o:G,d(e){e&&o(l),e&&o(a),e&&o(f),e&&o(k),i&&i.d(e),e&&o(n),_&&_.d(e),e&&o(p)}}}function D({error:r,status:l}){return{props:{error:r,status:l}}}function A(r,l,t){let{status:a}=l,{error:f}=l;return r.$$set=s=>{"status"in s&&t(0,a=s.status),"error"in s&&t(1,f=s.error)},[a,f]}class F extends h{constructor(l){super();w(this,l,A,z,y,{status:0,error:1})}}export{F as default,D as load};
469
0
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/_app
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/_app/chunks/DocNotebookDropdown-hf-doc-builder.js
import{S as H,i as K,s as P,F as U,e as S,c as L,a as y,d as v,b as u,g as E,G as W,Q,Y as ge,H as q,I as z,J as F,q as h,o as g,v as be,X as Z,O as x,P as $,f as De,L as G,w as V,x as T,y as M,B as A,k as Y,m as j,n as J,p as O,Z as we,l as R,t as ke,h as ve,j as Ne,W as Ee,N as X,K as Ce}from"./vendor-hf-doc-builder.js";function Ie(s){let e,n,l,t,o,a;const f=s[7].default,i=U(f,s,s[6],null);return{c(){e=S("div"),n=S("ul"),i&&i.c(),this.h()},l(r){e=L(r,"DIV",{class:!0});var b=y(e);n=L(b,"UL",{class:!0});var D=y(n);i&&i.l(D),D.forEach(v),b.forEach(v),this.h()},h(){u(n,"class","min-w-full w-auto"),u(e,"class",l="absolute top-full mt-1 min-w-full w-auto bg-white rounded-xl overflow-hidden shadow-lg z-10 border border-gray-100 "+(s[2]==="right"?"right-0":"left-0")+" "+s[0])},m(r,b){E(r,e,b),W(e,n),i&&i.m(n,null),s[8](e),t=!0,o||(a=Q(e,"click",function(){ge(s[1])&&s[1].apply(this,arguments)}),o=!0)},p(r,[b]){s=r,i&&i.p&&(!t||b&64)&&q(i,f,s,s[6],t?F(f,s[6],b,null):z(s[6]),null),(!t||b&5&&l!==(l="absolute top-full mt-1 min-w-full w-auto bg-white rounded-xl overflow-hidden shadow-lg z-10 border border-gray-100 "+(s[2]==="right"?"right-0":"left-0")+" "+s[0]))&&u(e,"class",l)},i(r){t||(h(i,r),t=!0)},o(r){g(i,r),t=!1},d(r){r&&v(e),i&&i.d(r),s[8](null),o=!1,a()}}}function Se(s,e,n){let{$$slots:l={},$$scope:t}=e,{classNames:o=""}=e,{dropdownElement:a=void 0}=e,{forceAlignement:f=void 0}=e,{onClose:i}=e,r=f!=null?f:"left",b;be(()=>{var c,d;if(document.addEventListener("click",D),!f){const _=document.documentElement.clientWidth,m=(b==null?void 0:b.getBoundingClientRect())||{},N=(c=m.left)!=null?c:0,B=(d=m.width)!=null?d:0;n(2,r=N+B>_?"right":"left")}return()=>{document.removeEventListener("click",D)}});function D(c){const d=c.target;d!==a&&!(a==null?void 0:a.contains(d))&&i()}function p(c){Z[c?"unshift":"push"](()=>{b=c,n(3,b)})}return s.$$set=c=>{"classNames"in c&&n(0,o=c.classNames),"dropdownElement"in c&&n(4,a=c.dropdownElement),"forceAlignement"in c&&n(5,f=c.forceAlignement),"onClose"in c&&n(1,i=c.onClose),"$$scope"in c&&n(6,t=c.$$scope)},[o,i,r,b,a,f,t,l,p]}class Le extends H{constructor(e){super();K(this,e,Se,Ie,P,{classNames:0,dropdownElement:4,forceAlignement:5,onClose:1})}}function ye(s){let e,n;return{c(){e=x("svg"),n=x("path"),this.h()},l(l){e=$(l,"svg",{class:!0,xmlns:!0,"xmlns:xlink":!0,"aria-hidden":!0,focusable:!0,role:!0,width:!0,height:!0,preserveAspectRatio:!0,viewBox:!0,style:!0});var t=y(e);n=$(t,"path",{d:!0,fill:!0}),y(n).forEach(v),t.forEach(v),this.h()},h(){u(n,"d","M7 10l5 5l5-5z"),u(n,"fill","currentColor"),u(e,"class",s[0]),u(e,"xmlns","http://www.w3.org/2000/svg"),u(e,"xmlns:xlink","http://www.w3.org/1999/xlink"),u(e,"aria-hidden","true"),u(e,"focusable","false"),u(e,"role","img"),u(e,"width","1em"),u(e,"height","1em"),u(e,"preserveAspectRatio","xMidYMid meet"),u(e,"viewBox","0 0 24 24"),De(e,"transform","rotate(360deg)")},m(l,t){E(l,e,t),W(e,n)},p(l,[t]){t&1&&u(e,"class",l[0])},i:G,o:G,d(l){l&&v(e)}}}function Ve(s,e,n){let{classNames:l=""}=e;return s.$$set=t=>{"classNames"in t&&n(0,l=t.classNames)},[l]}class Me extends H{constructor(e){super();K(this,e,Ve,ye,P,{classNames:0})}}const Ae=s=>({}),ee=s=>({}),Je=s=>({}),te=s=>({});function Oe(s){let e,n,l,t,o,a=s[2]&&le(s),f=s[10]&&ne();return{c(){a&&a.c(),e=Y(),n=ke(s[4]),l=Y(),f&&f.c(),t=R()},l(i){a&&a.l(i),e=j(i),n=ve(i,s[4]),l=j(i),f&&f.l(i),t=R()},m(i,r){a&&a.m(i,r),E(i,e,r),E(i,n,r),E(i,l,r),f&&f.m(i,r),E(i,t,r),o=!0},p(i,r){i[2]?a?(a.p(i,r),r&4&&h(a,1)):(a=le(i),a.c(),h(a,1),a.m(e.parentNode,e)):a&&(J(),g(a,1,1,()=>{a=null}),O()),(!o||r&16)&&Ne(n,i[4]),i[10]?f?r&1024&&h(f,1):(f=ne(),f.c(),h(f,1),f.m(t.parentNode,t)):f&&(J(),g(f,1,1,()=>{f=null}),O())},i(i){o||(h(a),h(f),o=!0)},o(i){g(a),g(f),o=!1},d(i){a&&a.d(i),i&&v(e),i&&v(n),i&&v(l),f&&f.d(i),i&&v(t)}}}function Re(s){let e;const n=s[14].button,l=U(n,s,s[18],te);return{c(){l&&l.c()},l(t){l&&l.l(t)},m(t,o){l&&l.m(t,o),e=!0},p(t,o){l&&l.p&&(!e||o&262144)&&q(l,n,t,t[18],e?F(n,t[18],o,Je):z(t[18]),te)},i(t){e||(h(l,t),e=!0)},o(t){g(l,t),e=!1},d(t){l&&l.d(t)}}}function le(s){let e,n,l;var t=s[2];function o(a){return{props:{classNames:"mr-1.5 "+a[3]}}}return t&&(e=new t(o(s))),{c(){e&&V(e.$$.fragment),n=R()},l(a){e&&T(e.$$.fragment,a),n=R()},m(a,f){e&&M(e,a,f),E(a,n,f),l=!0},p(a,f){const i={};if(f&8&&(i.classNames="mr-1.5 "+a[3]),t!==(t=a[2])){if(e){J();const r=e;g(r.$$.fragment,1,0,()=>{A(r,1)}),O()}t?(e=new t(o(a)),V(e.$$.fragment),h(e.$$.fragment,1),M(e,n.parentNode,n)):e=null}else t&&e.$set(i)},i(a){l||(e&&h(e.$$.fragment,a),l=!0)},o(a){e&&g(e.$$.fragment,a),l=!1},d(a){a&&v(n),e&&A(e,a)}}}function ne(s){let e,n;return e=new Me({props:{classNames:"-mr-1 text-gray-500"}}),{c(){V(e.$$.fragment)},l(l){T(e.$$.fragment,l)},m(l,t){M(e,l,t),n=!0},i(l){n||(h(e.$$.fragment,l),n=!0)},o(l){g(e.$$.fragment,l),n=!1},d(l){A(e,l)}}}function se(s){let e,n;return e=new Le({props:{classNames:s[6]+" "+(s[9]?"v2-dropdown-menu hidden":""),dropdownElement:s[11],forceAlignement:s[5],onClose:s[16],$$slots:{default:[We]},$$scope:{ctx:s}}}),{c(){V(e.$$.fragment)},l(l){T(e.$$.fragment,l)},m(l,t){M(e,l,t),n=!0},p(l,t){const o={};t&576&&(o.classNames=l[6]+" "+(l[9]?"v2-dropdown-menu hidden":"")),t&2048&&(o.dropdownElement=l[11]),t&32&&(o.forceAlignement=l[5]),t&4096&&(o.onClose=l[16]),t&262144&&(o.$$scope={dirty:t,ctx:l}),e.$set(o)},i(l){n||(h(e.$$.fragment,l),n=!0)},o(l){g(e.$$.fragment,l),n=!1},d(l){A(e,l)}}}function We(s){let e;const n=s[14].menu,l=U(n,s,s[18],ee);return{c(){l&&l.c()},l(t){l&&l.l(t)},m(t,o){l&&l.m(t,o),e=!0},p(t,o){l&&l.p&&(!e||o&262144)&&q(l,n,t,t[18],e?F(n,t[18],o,Ae):z(t[18]),ee)},i(t){e||(h(l,t),e=!0)},o(t){g(l,t),e=!1},d(t){l&&l.d(t)}}}function Ue(s){let e,n,l,t,o,a,f,i,r,b,D;const p=[Re,Oe],c=[];function d(m,N){return m[13].button?0:1}l=d(s),t=c[l]=p[l](s);let _=(s[12]||s[9])&&se(s);return{c(){e=S("div"),n=S("button"),t.c(),a=Y(),_&&_.c(),this.h()},l(m){e=L(m,"DIV",{class:!0,"selected-value":!0});var N=y(e);n=L(N,"BUTTON",{class:!0,type:!0});var B=y(n);t.l(B),B.forEach(v),a=j(N),_&&_.l(N),N.forEach(v),this.h()},h(){u(n,"class",o=""+s[1]+" "+(s[7]?"":"cursor-pointer w-full btn text-sm")+" "+(s[9]?"v2-dropdown-button":"")),u(n,"type","button"),u(e,"class",f="relative "+s[0]+" "+(s[9]?"v2-dropdown":"")),u(e,"selected-value",i=s[8]||void 0)},m(m,N){E(m,e,N),W(e,n),c[l].m(n,null),W(e,a),_&&_.m(e,null),s[17](e),r=!0,b||(D=Q(n,"click",s[15]),b=!0)},p(m,[N]){let B=l;l=d(m),l===B?c[l].p(m,N):(J(),g(c[B],1,1,()=>{c[B]=null}),O(),t=c[l],t?t.p(m,N):(t=c[l]=p[l](m),t.c()),h(t,1),t.m(n,null)),(!r||N&642&&o!==(o=""+m[1]+" "+(m[7]?"":"cursor-pointer w-full btn text-sm")+" "+(m[9]?"v2-dropdown-button":"")))&&u(n,"class",o),m[12]||m[9]?_?(_.p(m,N),N&4608&&h(_,1)):(_=se(m),_.c(),h(_,1),_.m(e,null)):_&&(J(),g(_,1,1,()=>{_=null}),O()),(!r||N&513&&f!==(f="relative "+m[0]+" "+(m[9]?"v2-dropdown":"")))&&u(e,"class",f),(!r||N&256&&i!==(i=m[8]||void 0))&&u(e,"selected-value",i)},i(m){r||(h(t),h(_),r=!0)},o(m){g(t),g(_),r=!1},d(m){m&&v(e),c[l].d(),_&&_.d(),s[17](null),b=!1,D()}}}function qe(s,e,n){let{$$slots:l={},$$scope:t}=e;const o=we(l);let{classNames:a=""}=e,{btnClassNames:f=""}=e,{btnIcon:i=void 0}=e,{btnIconClassNames:r=""}=e,{btnLabel:b=""}=e,{forceMenuAlignement:D=void 0}=e,{menuClassNames:p=""}=e,{noBtnClass:c=void 0}=e,{selectedValue:d=void 0}=e,{useDeprecatedJS:_=!0}=e,{withBtnCaret:m=!1}=e,N,B=!1;const w=()=>n(12,B=!B),k=()=>n(12,B=!1);function I(C){Z[C?"unshift":"push"](()=>{N=C,n(11,N)})}return s.$$set=C=>{"classNames"in C&&n(0,a=C.classNames),"btnClassNames"in C&&n(1,f=C.btnClassNames),"btnIcon"in C&&n(2,i=C.btnIcon),"btnIconClassNames"in C&&n(3,r=C.btnIconClassNames),"btnLabel"in C&&n(4,b=C.btnLabel),"forceMenuAlignement"in C&&n(5,D=C.forceMenuAlignement),"menuClassNames"in C&&n(6,p=C.menuClassNames),"noBtnClass"in C&&n(7,c=C.noBtnClass),"selectedValue"in C&&n(8,d=C.selectedValue),"useDeprecatedJS"in C&&n(9,_=C.useDeprecatedJS),"withBtnCaret"in C&&n(10,m=C.withBtnCaret),"$$scope"in C&&n(18,t=C.$$scope)},[a,f,i,r,b,D,p,c,d,_,m,N,B,o,l,w,k,I,t]}class pe extends H{constructor(e){super();K(this,e,qe,Ue,P,{classNames:0,btnClassNames:1,btnIcon:2,btnIconClassNames:3,btnLabel:4,forceMenuAlignement:5,menuClassNames:6,noBtnClass:7,selectedValue:8,useDeprecatedJS:9,withBtnCaret:10})}}function ze(s){let e,n,l,t=s[5]&&ae(s);return{c(){t&&t.c(),e=Y(),n=ke(s[7])},l(o){t&&t.l(o),e=j(o),n=ve(o,s[7])},m(o,a){t&&t.m(o,a),E(o,e,a),E(o,n,a),l=!0},p(o,a){o[5]?t?(t.p(o,a),a&32&&h(t,1)):(t=ae(o),t.c(),h(t,1),t.m(e.parentNode,e)):t&&(J(),g(t,1,1,()=>{t=null}),O()),(!l||a&128)&&Ne(n,o[7])},i(o){l||(h(t),l=!0)},o(o){g(t),l=!1},d(o){t&&t.d(o),o&&v(e),o&&v(n)}}}function Fe(s){let e;const n=s[15].default,l=U(n,s,s[14],null);return{c(){l&&l.c()},l(t){l&&l.l(t)},m(t,o){l&&l.m(t,o),e=!0},p(t,o){l&&l.p&&(!e||o&16384)&&q(l,n,t,t[14],e?F(n,t[14],o,null):z(t[14]),null)},i(t){e||(h(l,t),e=!0)},o(t){g(l,t),e=!1},d(t){l&&l.d(t)}}}function ae(s){let e,n,l;var t=s[5];function o(a){return{props:{classNames:"mr-1.5 "+a[6]}}}return t&&(e=new t(o(s))),{c(){e&&V(e.$$.fragment),n=R()},l(a){e&&T(e.$$.fragment,a),n=R()},m(a,f){e&&M(e,a,f),E(a,n,f),l=!0},p(a,f){const i={};if(f&64&&(i.classNames="mr-1.5 "+a[6]),t!==(t=a[5])){if(e){J();const r=e;g(r.$$.fragment,1,0,()=>{A(r,1)}),O()}t?(e=new t(o(a)),V(e.$$.fragment),h(e.$$.fragment,1),M(e,n.parentNode,n)):e=null}else t&&e.$set(i)},i(a){l||(e&&h(e.$$.fragment,a),l=!0)},o(a){e&&g(e.$$.fragment,a),l=!1},d(a){a&&v(n),e&&A(e,a)}}}function Ge(s){let e,n,l,t,o,a,f,i,r,b;const D=[Fe,ze],p=[];function c(d,_){return d[13].default?0:1}return l=c(s),t=p[l]=D[l](s),{c(){e=S("li"),n=S("a"),t.c(),this.h()},l(d){e=L(d,"LI",{});var _=y(e);n=L(_,"A",{class:!0,"data-label":!0,"data-url":!0,"data-value":!0,href:!0,rel:!0,target:!0});var m=y(n);t.l(m),m.forEach(v),_.forEach(v),this.h()},h(){u(n,"class",o="flex items-center hover:bg-gray-50 dark:hover:bg-gray-800 cursor-pointer px-3 py-1.5 whitespace-nowrap "+s[0]+" "+(s[9]?"hover:underline":"")+" "+(s[12]?"v2-dropdown-entry":"")),u(n,"data-label",s[1]),u(n,"data-url",s[2]),u(n,"data-value",s[3]),u(n,"href",s[4]),u(n,"rel",a=s[8]?"nofollow":void 0),u(n,"target",f=s[11]?"_blank":void 0)},m(d,_){E(d,e,_),W(e,n),p[l].m(n,null),i=!0,r||(b=Q(n,"click",function(){ge(s[10])&&s[10].apply(this,arguments)}),r=!0)},p(d,[_]){s=d;let m=l;l=c(s),l===m?p[l].p(s,_):(J(),g(p[m],1,1,()=>{p[m]=null}),O(),t=p[l],t?t.p(s,_):(t=p[l]=D[l](s),t.c()),h(t,1),t.m(n,null)),(!i||_&4609&&o!==(o="flex items-center hover:bg-gray-50 dark:hover:bg-gray-800 cursor-pointer px-3 py-1.5 whitespace-nowrap "+s[0]+" "+(s[9]?"hover:underline":"")+" "+(s[12]?"v2-dropdown-entry":"")))&&u(n,"class",o),(!i||_&2)&&u(n,"data-label",s[1]),(!i||_&4)&&u(n,"data-url",s[2]),(!i||_&8)&&u(n,"data-value",s[3]),(!i||_&16)&&u(n,"href",s[4]),(!i||_&256&&a!==(a=s[8]?"nofollow":void 0))&&u(n,"rel",a),(!i||_&2048&&f!==(f=s[11]?"_blank":void 0))&&u(n,"target",f)},i(d){i||(h(t),i=!0)},o(d){g(t),i=!1},d(d){d&&v(e),p[l].d(),r=!1,b()}}}function Te(s,e,n){let{$$slots:l={},$$scope:t}=e;const o=we(l);let{classNames:a=""}=e,{dataLabel:f=void 0}=e,{dataUrl:i=void 0}=e,{dataValue:r=void 0}=e,{href:b=void 0}=e,{icon:D=void 0}=e,{iconClassNames:p=""}=e,{label:c=""}=e,{noFollow:d=!1}=e,{underline:_=!1}=e,{onClick:m=()=>{}}=e,{targetBlank:N=!1}=e,{useDeprecatedJS:B=!0}=e;return s.$$set=w=>{"classNames"in w&&n(0,a=w.classNames),"dataLabel"in w&&n(1,f=w.dataLabel),"dataUrl"in w&&n(2,i=w.dataUrl),"dataValue"in w&&n(3,r=w.dataValue),"href"in w&&n(4,b=w.href),"icon"in w&&n(5,D=w.icon),"iconClassNames"in w&&n(6,p=w.iconClassNames),"label"in w&&n(7,c=w.label),"noFollow"in w&&n(8,d=w.noFollow),"underline"in w&&n(9,_=w.underline),"onClick"in w&&n(10,m=w.onClick),"targetBlank"in w&&n(11,N=w.targetBlank),"useDeprecatedJS"in w&&n(12,B=w.useDeprecatedJS),"$$scope"in w&&n(14,t=w.$$scope)},[a,f,i,r,b,D,p,c,d,_,m,N,B,o,t,l]}class Be extends H{constructor(e){super();K(this,e,Te,Ge,P,{classNames:0,dataLabel:1,dataUrl:2,dataValue:3,href:4,icon:5,iconClassNames:6,label:7,noFollow:8,underline:9,onClick:10,targetBlank:11,useDeprecatedJS:12})}}const{window:Ye}=Ee,je=s=>({}),oe=s=>({slot:"button"});function ie(s,e,n){const l=s.slice();return l[11]=e[n].label,l[12]=e[n].value,l}const He=s=>({}),fe=s=>({slot:"menu"}),Ke=s=>({}),re=s=>({slot:"button"});function ue(s,e,n){const l=s.slice();return l[11]=e[n].label,l[12]=e[n].value,l}const Pe=s=>({}),ce=s=>({slot:"menu"}),Qe=s=>({}),_e=s=>({slot:"menu"});function Xe(s){let e,n;return e=new pe({props:{btnLabel:"",classNames:"colab-dropdown",noBtnClass:!0,useDeprecatedJS:!1,$$slots:{menu:[tt],button:[$e]},$$scope:{ctx:s}}}),{c(){V(e.$$.fragment)},l(l){T(e.$$.fragment,l)},m(l,t){M(e,l,t),n=!0},p(l,t){const o={};t&1024&&(o.$$scope={dirty:t,ctx:l}),e.$set(o)},i(l){n||(h(e.$$.fragment,l),n=!0)},o(l){g(e.$$.fragment,l),n=!1},d(l){A(e,l)}}}function Ze(s){let e,n,l;return{c(){e=S("a"),n=S("img"),this.h()},l(t){e=L(t,"A",{href:!0,target:!0});var o=y(e);n=L(o,"IMG",{alt:!0,class:!0,src:!0}),o.forEach(v),this.h()},h(){u(n,"alt","Open In Colab"),u(n,"class","!m-0"),X(n.src,l="https://colab.research.google.com/assets/colab-badge.svg")||u(n,"src",l),u(e,"href",s[2][0].value),u(e,"target","_blank")},m(t,o){E(t,e,o),W(e,n)},p:G,i:G,o:G,d(t){t&&v(e)}}}function xe(s){let e,n;return{c(){e=S("img"),this.h()},l(l){e=L(l,"IMG",{alt:!0,class:!0,src:!0}),this.h()},h(){u(e,"alt","Open In Colab"),u(e,"class","!m-0"),X(e.src,n="https://colab.research.google.com/assets/colab-badge.svg")||u(e,"src",n)},m(l,t){E(l,e,t)},d(l){l&&v(e)}}}function $e(s){let e;const n=s[6].default,l=U(n,s,s[10],re),t=l||xe();return{c(){t&&t.c()},l(o){t&&t.l(o)},m(o,a){t&&t.m(o,a),e=!0},p(o,a){l&&l.p&&(!e||a&1024)&&q(l,n,o,o[10],e?F(n,o[10],a,Ke):z(o[10]),re)},i(o){e||(h(t,o),e=!0)},o(o){g(t,o),e=!1},d(o){t&&t.d(o)}}}function de(s){let e,n;function l(){return s[7](s[12])}return e=new Be({props:{classNames:"text-sm !no-underline",iconClassNames:"text-gray-500",label:s[11],onClick:l,useDeprecatedJS:!1}}),{c(){V(e.$$.fragment)},l(t){T(e.$$.fragment,t)},m(t,o){M(e,t,o),n=!0},p(t,o){s=t},i(t){n||(h(e.$$.fragment,t),n=!0)},o(t){g(e.$$.fragment,t),n=!1},d(t){A(e,t)}}}function et(s){let e,n,l=s[2],t=[];for(let a=0;a<l.length;a+=1)t[a]=de(ue(s,l,a));const o=a=>g(t[a],1,1,()=>{t[a]=null});return{c(){for(let a=0;a<t.length;a+=1)t[a].c();e=R()},l(a){for(let f=0;f<t.length;f+=1)t[f].l(a);e=R()},m(a,f){for(let i=0;i<t.length;i+=1)t[i].m(a,f);E(a,e,f),n=!0},p(a,f){if(f&4){l=a[2];let i;for(i=0;i<l.length;i+=1){const r=ue(a,l,i);t[i]?(t[i].p(r,f),h(t[i],1)):(t[i]=de(r),t[i].c(),h(t[i],1),t[i].m(e.parentNode,e))}for(J(),i=l.length;i<t.length;i+=1)o(i);O()}},i(a){if(!n){for(let f=0;f<l.length;f+=1)h(t[f]);n=!0}},o(a){t=t.filter(Boolean);for(let f=0;f<t.length;f+=1)g(t[f]);n=!1},d(a){Ce(t,a),a&&v(e)}}}function tt(s){let e;const n=s[6].default,l=U(n,s,s[10],ce),t=l||et(s);return{c(){t&&t.c()},l(o){t&&t.l(o)},m(o,a){t&&t.m(o,a),e=!0},p(o,a){l&&l.p&&(!e||a&1024)&&q(l,n,o,o[10],e?F(n,o[10],a,Pe):z(o[10]),ce)},i(o){e||(h(t,o),e=!0)},o(o){g(t,o),e=!1},d(o){t&&t.d(o)}}}function lt(s){let e,n;return e=new pe({props:{btnLabel:"",classNames:"colab-dropdown",noBtnClass:!0,useDeprecatedJS:!1,$$slots:{menu:[it],button:[at]},$$scope:{ctx:s}}}),{c(){V(e.$$.fragment)},l(l){T(e.$$.fragment,l)},m(l,t){M(e,l,t),n=!0},p(l,t){const o={};t&1024&&(o.$$scope={dirty:t,ctx:l}),e.$set(o)},i(l){n||(h(e.$$.fragment,l),n=!0)},o(l){g(e.$$.fragment,l),n=!1},d(l){A(e,l)}}}function nt(s){let e,n,l;return{c(){e=S("a"),n=S("img"),this.h()},l(t){e=L(t,"A",{href:!0,target:!0});var o=y(e);n=L(o,"IMG",{alt:!0,class:!0,src:!0}),o.forEach(v),this.h()},h(){u(n,"alt","Open In Studio Lab"),u(n,"class","!m-0"),X(n.src,l="https://studiolab.sagemaker.aws/studiolab.svg")||u(n,"src",l),u(e,"href",s[3][0].value),u(e,"target","_blank")},m(t,o){E(t,e,o),W(e,n)},p:G,i:G,o:G,d(t){t&&v(e)}}}function st(s){let e,n;return{c(){e=S("img"),this.h()},l(l){e=L(l,"IMG",{alt:!0,class:!0,src:!0}),this.h()},h(){u(e,"alt","Open In Studio Lab"),u(e,"class","!m-0"),X(e.src,n="https://studiolab.sagemaker.aws/studiolab.svg")||u(e,"src",n)},m(l,t){E(l,e,t)},d(l){l&&v(e)}}}function at(s){let e;const n=s[6].default,l=U(n,s,s[10],oe),t=l||st();return{c(){t&&t.c()},l(o){t&&t.l(o)},m(o,a){t&&t.m(o,a),e=!0},p(o,a){l&&l.p&&(!e||a&1024)&&q(l,n,o,o[10],e?F(n,o[10],a,je):z(o[10]),oe)},i(o){e||(h(t,o),e=!0)},o(o){g(t,o),e=!1},d(o){t&&t.d(o)}}}function me(s){let e,n;function l(){return s[8](s[12])}return e=new Be({props:{classNames:"text-sm !no-underline",iconClassNames:"text-gray-500",label:s[11],onClick:l,useDeprecatedJS:!1}}),{c(){V(e.$$.fragment)},l(t){T(e.$$.fragment,t)},m(t,o){M(e,t,o),n=!0},p(t,o){s=t},i(t){n||(h(e.$$.fragment,t),n=!0)},o(t){g(e.$$.fragment,t),n=!1},d(t){A(e,t)}}}function ot(s){let e,n,l=s[3],t=[];for(let a=0;a<l.length;a+=1)t[a]=me(ie(s,l,a));const o=a=>g(t[a],1,1,()=>{t[a]=null});return{c(){for(let a=0;a<t.length;a+=1)t[a].c();e=R()},l(a){for(let f=0;f<t.length;f+=1)t[f].l(a);e=R()},m(a,f){for(let i=0;i<t.length;i+=1)t[i].m(a,f);E(a,e,f),n=!0},p(a,f){if(f&8){l=a[3];let i;for(i=0;i<l.length;i+=1){const r=ie(a,l,i);t[i]?(t[i].p(r,f),h(t[i],1)):(t[i]=me(r),t[i].c(),h(t[i],1),t[i].m(e.parentNode,e))}for(J(),i=l.length;i<t.length;i+=1)o(i);O()}},i(a){if(!n){for(let f=0;f<l.length;f+=1)h(t[f]);n=!0}},o(a){t=t.filter(Boolean);for(let f=0;f<t.length;f+=1)g(t[f]);n=!1},d(a){Ce(t,a),a&&v(e)}}}function it(s){let e;const n=s[6].default,l=U(n,s,s[10],fe),t=l||ot(s);return{c(){t&&t.c()},l(o){t&&t.l(o)},m(o,a){t&&t.m(o,a),e=!0},p(o,a){l&&l.p&&(!e||a&1024)&&q(l,n,o,o[10],e?F(n,o[10],a,He):z(o[10]),fe)},i(o){e||(h(t,o),e=!0)},o(o){g(t,o),e=!1},d(o){t&&t.d(o)}}}function ft(s){let e,n,l,t,o,a,f,i,r,b,D;const p=s[6].alwaysVisible,c=U(p,s,s[10],_e),d=[Ze,Xe],_=[];function m(k,I){return k[2].length===1?0:k[2].length>1?1:-1}~(l=m(s))&&(t=_[l]=d[l](s));const N=[nt,lt],B=[];function w(k,I){return k[3].length===1?0:k[3].length>1?1:-1}return~(a=w(s))&&(f=B[a]=N[a](s)),{c(){e=S("div"),c&&c.c(),n=Y(),t&&t.c(),o=Y(),f&&f.c(),this.h()},l(k){e=L(k,"DIV",{class:!0});var I=y(e);c&&c.l(I),n=j(I),t&&t.l(I),o=j(I),f&&f.l(I),I.forEach(v),this.h()},h(){u(e,"class",i="flex space-x-1 "+s[0])},m(k,I){E(k,e,I),c&&c.m(e,null),W(e,n),~l&&_[l].m(e,null),W(e,o),~a&&B[a].m(e,null),s[9](e),r=!0,b||(D=Q(Ye,"resize",s[4]),b=!0)},p(k,[I]){c&&c.p&&(!r||I&1024)&&q(c,p,k,k[10],r?F(p,k[10],I,Qe):z(k[10]),_e),t&&t.p(k,I),f&&f.p(k,I),(!r||I&1&&i!==(i="flex space-x-1 "+k[0]))&&u(e,"class",i)},i(k){r||(h(c,k),h(t),h(f),r=!0)},o(k){g(c,k),g(t),g(f),r=!1},d(k){k&&v(e),c&&c.d(k),~l&&_[l].d(),~a&&B[a].d(),s[9](null),b=!1,D()}}}function he(s){window.open(s)}function rt(s,e,n){let{$$slots:l={},$$scope:t}=e,{options:o=[]}=e,{classNames:a=""}=e,f;const i=o.filter(d=>d.value.includes("colab.research.google.com")),r=o.filter(d=>d.value.includes("studiolab.sagemaker.aws"));function b(){const d=document.querySelector(".prose-doc h1"),_=document.querySelector(".prose-doc h1 > span");if(d&&_){const{width:m}=d.getBoundingClientRect(),{width:N}=_.getBoundingClientRect();let B=0;for(let k=0;k<f.children.length;k++)B+=f.children.item(k).clientWidth;const w=20;m-N<B+w?f.classList.remove("absolute"):f.classList.add("absolute")}}be(()=>{b()});const D=d=>he(d),p=d=>he(d);function c(d){Z[d?"unshift":"push"](()=>{f=d,n(1,f)})}return s.$$set=d=>{"options"in d&&n(5,o=d.options),"classNames"in d&&n(0,a=d.classNames),"$$scope"in d&&n(10,t=d.$$scope)},[a,f,i,r,b,o,l,D,p,c,t]}class ct extends H{constructor(e){super();K(this,e,rt,ft,P,{options:5,classNames:0})}}export{ct as D};
470
0
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/_app
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/_app/chunks/IconCopyLink-hf-doc-builder.js
import{S as m,i as u,s as d,O as n,P as o,a as h,d as i,b as a,g as p,G as g,L as c}from"./vendor-hf-doc-builder.js";function v(l){let e,r;return{c(){e=n("svg"),r=n("path"),this.h()},l(t){e=o(t,"svg",{class:!0,xmlns:!0,"xmlns:xlink":!0,"aria-hidden":!0,role:!0,width:!0,height:!0,preserveAspectRatio:!0,viewBox:!0});var s=h(e);r=o(s,"path",{d:!0,fill:!0}),h(r).forEach(i),s.forEach(i),this.h()},h(){a(r,"d","M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z"),a(r,"fill","currentColor"),a(e,"class",l[0]),a(e,"xmlns","http://www.w3.org/2000/svg"),a(e,"xmlns:xlink","http://www.w3.org/1999/xlink"),a(e,"aria-hidden","true"),a(e,"role","img"),a(e,"width","1em"),a(e,"height","1em"),a(e,"preserveAspectRatio","xMidYMid meet"),a(e,"viewBox","0 0 256 256")},m(t,s){p(t,e,s),g(e,r)},p(t,[s]){s&1&&a(e,"class",t[0])},i:c,o:c,d(t){t&&i(e)}}}function f(l,e,r){let{classNames:t=""}=e;return l.$$set=s=>{"classNames"in s&&r(0,t=s.classNames)},[t]}class x extends m{constructor(e){super();u(this,e,f,v,d,{classNames:0})}}export{x as I};
471
0
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/_app
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/_app/chunks/Youtube-hf-doc-builder.js
import{S as u,i as n,s as m,e as d,c as f,a as h,d as o,b as s,N as c,g as p,L as l}from"./vendor-hf-doc-builder.js";function w(i){let e,r;return{c(){e=d("iframe"),this.h()},l(t){e=f(t,"IFRAME",{class:!0,src:!0,title:!0,frameborder:!0,allow:!0}),h(e).forEach(o),this.h()},h(){s(e,"class","w-full xl:w-4/6 h-80"),c(e.src,r="https://www.youtube-nocookie.com/embed/"+i[0])||s(e,"src",r),s(e,"title","YouTube video player"),s(e,"frameborder","0"),s(e,"allow","accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture"),e.allowFullscreen=!0},m(t,a){p(t,e,a)},p(t,[a]){a&1&&!c(e.src,r="https://www.youtube-nocookie.com/embed/"+t[0])&&s(e,"src",r)},i:l,o:l,d(t){t&&o(e)}}}function b(i,e,r){let{id:t}=e;return i.$$set=a=>{"id"in a&&r(0,t=a.id)},[t]}class y extends u{constructor(e){super();n(this,e,b,w,m,{id:0})}}export{y as Y};
472
0
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/_app
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/_app/chunks/Tip-hf-doc-builder.js
import{S as f,i as d,s as u,F as _,e as g,c,a as m,d as l,b,g as h,H as p,I as k,J as w,q as v,o as y}from"./vendor-hf-doc-builder.js";function q(t){let a,s;const o=t[3].default,r=_(o,t,t[2],null);return{c(){a=g("div"),r&&r.c(),this.h()},l(e){a=c(e,"DIV",{class:!0});var n=m(a);r&&r.l(n),n.forEach(l),this.h()},h(){b(a,"class","course-tip "+(t[0]==="orange"?"course-tip-orange":"")+" bg-gradient-to-br dark:bg-gradient-to-r before:border-"+t[0]+"-500 dark:before:border-"+t[0]+"-800 from-"+t[0]+"-50 dark:from-gray-900 to-white dark:to-gray-950 border border-"+t[0]+"-50 text-"+t[0]+"-700 dark:text-gray-400")},m(e,n){h(e,a,n),r&&r.m(a,null),s=!0},p(e,[n]){r&&r.p&&(!s||n&4)&&p(r,o,e,e[2],s?w(o,e[2],n,null):k(e[2]),null)},i(e){s||(v(r,e),s=!0)},o(e){y(r,e),s=!1},d(e){e&&l(a),r&&r.d(e)}}}function I(t,a,s){let{$$slots:o={},$$scope:r}=a,{warning:e=!1}=a;const n=e?"orange":"green";return t.$$set=i=>{"warning"in i&&s(1,e=i.warning),"$$scope"in i&&s(2,r=i.$$scope)},[n,e,r,o]}class T extends f{constructor(a){super();d(this,a,I,q,u,{warning:1})}}export{T};
473
0
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/_app
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/_app/chunks/vendor-hf-doc-builder.js
function h(){}function I(t,n){for(const e in n)t[e]=n[e];return t}function B(t){return t()}function T(){return Object.create(null)}function b(t){t.forEach(B)}function W(t){return typeof t=="function"}function G(t,n){return t!=t?n==n:t!==n||t&&typeof t=="object"||typeof t=="function"}let x;function lt(t,n){return x||(x=document.createElement("a")),x.href=n,t===x.href}function J(t){return Object.keys(t).length===0}function K(t,...n){if(t==null)return h;const e=t.subscribe(...n);return e.unsubscribe?()=>e.unsubscribe():e}function ft(t,n,e){t.$$.on_destroy.push(K(n,e))}function at(t,n,e,i){if(t){const c=L(t,n,e,i);return t[0](c)}}function L(t,n,e,i){return t[1]&&i?I(e.ctx.slice(),t[1](i(n))):e.ctx}function dt(t,n,e,i){if(t[2]&&i){const c=t[2](i(e));if(n.dirty===void 0)return c;if(typeof c=="object"){const l=[],o=Math.max(n.dirty.length,c.length);for(let u=0;u<o;u+=1)l[u]=n.dirty[u]|c[u];return l}return n.dirty|c}return n.dirty}function _t(t,n,e,i,c,l){if(c){const o=L(n,e,i,l);t.p(o,c)}}function ht(t){if(t.ctx.length>32){const n=[],e=t.ctx.length/32;for(let i=0;i<e;i++)n[i]=-1;return n}return-1}function mt(t){const n={};for(const e in t)n[e]=!0;return n}function pt(t,n,e){return t.set(e),n}let k=!1;function Q(){k=!0}function R(){k=!1}function U(t,n,e,i){for(;t<n;){const c=t+(n-t>>1);e(c)<=i?t=c+1:n=c}return t}function V(t){if(t.hydrate_init)return;t.hydrate_init=!0;let n=t.childNodes;if(t.nodeName==="HEAD"){const r=[];for(let s=0;s<n.length;s++){const a=n[s];a.claim_order!==void 0&&r.push(a)}n=r}const e=new Int32Array(n.length+1),i=new Int32Array(n.length);e[0]=-1;let c=0;for(let r=0;r<n.length;r++){const s=n[r].claim_order,a=(c>0&&n[e[c]].claim_order<=s?c+1:U(1,c,g=>n[e[g]].claim_order,s))-1;i[r]=e[a]+1;const f=a+1;e[f]=r,c=Math.max(f,c)}const l=[],o=[];let u=n.length-1;for(let r=e[c]+1;r!=0;r=i[r-1]){for(l.push(n[r-1]);u>=r;u--)o.push(n[u]);u--}for(;u>=0;u--)o.push(n[u]);l.reverse(),o.sort((r,s)=>r.claim_order-s.claim_order);for(let r=0,s=0;r<o.length;r++){for(;s<l.length&&o[r].claim_order>=l[s].claim_order;)s++;const a=s<l.length?l[s]:null;t.insertBefore(o[r],a)}}function X(t,n){if(k){for(V(t),(t.actual_end_child===void 0||t.actual_end_child!==null&&t.actual_end_child.parentElement!==t)&&(t.actual_end_child=t.firstChild);t.actual_end_child!==null&&t.actual_end_child.claim_order===void 0;)t.actual_end_child=t.actual_end_child.nextSibling;n!==t.actual_end_child?(n.claim_order!==void 0||n.parentNode!==t)&&t.insertBefore(n,t.actual_end_child):t.actual_end_child=n.nextSibling}else(n.parentNode!==t||n.nextSibling!==null)&&t.appendChild(n)}function yt(t,n,e){k&&!e?X(t,n):(n.parentNode!==t||n.nextSibling!=e)&&t.insertBefore(n,e||null)}function Y(t){t.parentNode.removeChild(t)}function bt(t,n){for(let e=0;e<t.length;e+=1)t[e]&&t[e].d(n)}function Z(t){return document.createElement(t)}function tt(t){return document.createElementNS("http://www.w3.org/2000/svg",t)}function v(t){return document.createTextNode(t)}function gt(){return v(" ")}function xt(){return v("")}function wt(t,n,e,i){return t.addEventListener(n,e,i),()=>t.removeEventListener(n,e,i)}function $t(t,n,e){e==null?t.removeAttribute(n):t.getAttribute(n)!==e&&t.setAttribute(n,e)}function nt(t){return Array.from(t.childNodes)}function et(t){t.claim_info===void 0&&(t.claim_info={last_index:0,total_claimed:0})}function O(t,n,e,i,c=!1){et(t);const l=(()=>{for(let o=t.claim_info.last_index;o<t.length;o++){const u=t[o];if(n(u)){const r=e(u);return r===void 0?t.splice(o,1):t[o]=r,c||(t.claim_info.last_index=o),u}}for(let o=t.claim_info.last_index-1;o>=0;o--){const u=t[o];if(n(u)){const r=e(u);return r===void 0?t.splice(o,1):t[o]=r,c?r===void 0&&t.claim_info.last_index--:t.claim_info.last_index=o,u}}return i()})();return l.claim_order=t.claim_info.total_claimed,t.claim_info.total_claimed+=1,l}function P(t,n,e,i){return O(t,c=>c.nodeName===n,c=>{const l=[];for(let o=0;o<c.attributes.length;o++){const u=c.attributes[o];e[u.name]||l.push(u.name)}l.forEach(o=>c.removeAttribute(o))},()=>i(n))}function Et(t,n,e){return P(t,n,e,Z)}function kt(t,n,e){return P(t,n,e,tt)}function it(t,n){return O(t,e=>e.nodeType===3,e=>{const i=""+n;if(e.data.startsWith(i)){if(e.data.length!==i.length)return e.splitText(i.length)}else e.data=i},()=>v(n),!0)}function St(t){return it(t," ")}function At(t,n){n=""+n,t.wholeText!==n&&(t.data=n)}function Nt(t,n,e,i){e===null?t.style.removeProperty(n):t.style.setProperty(n,e,i?"important":"")}function jt(t,n=document.body){return Array.from(n.querySelectorAll(t))}let y;function p(t){y=t}function S(){if(!y)throw new Error("Function called outside component initialization");return y}function vt(t){S().$$.on_mount.push(t)}function qt(t){S().$$.after_update.push(t)}function Ct(t){S().$$.on_destroy.push(t)}function Tt(t,n){S().$$.context.set(t,n)}const m=[],M=[],$=[],z=[],D=Promise.resolve();let N=!1;function F(){N||(N=!0,D.then(H))}function Mt(){return F(),D}function j(t){$.push(t)}const A=new Set;let w=0;function H(){const t=y;do{for(;w<m.length;){const n=m[w];w++,p(n),rt(n.$$)}for(p(null),m.length=0,w=0;M.length;)M.pop()();for(let n=0;n<$.length;n+=1){const e=$[n];A.has(e)||(A.add(e),e())}$.length=0}while(m.length);for(;z.length;)z.pop()();N=!1,A.clear(),p(t)}function rt(t){if(t.fragment!==null){t.update(),b(t.before_update);const n=t.dirty;t.dirty=[-1],t.fragment&&t.fragment.p(t.ctx,n),t.after_update.forEach(j)}}const E=new Set;let d;function zt(){d={r:0,c:[],p:d}}function Bt(){d.r||b(d.c),d=d.p}function ct(t,n){t&&t.i&&(E.delete(t),t.i(n))}function Lt(t,n,e,i){if(t&&t.o){if(E.has(t))return;E.add(t),d.c.push(()=>{E.delete(t),i&&(e&&t.d(1),i())}),t.o(n)}}const Ot=typeof window!="undefined"?window:typeof globalThis!="undefined"?globalThis:global;function Pt(t,n){const e={},i={},c={$$scope:1};let l=t.length;for(;l--;){const o=t[l],u=n[l];if(u){for(const r in o)r in u||(i[r]=1);for(const r in u)c[r]||(e[r]=u[r],c[r]=1);t[l]=u}else for(const r in o)c[r]=1}for(const o in i)o in e||(e[o]=void 0);return e}function Dt(t){return typeof t=="object"&&t!==null?t:{}}function Ft(t){t&&t.c()}function Ht(t,n){t&&t.l(n)}function ot(t,n,e,i){const{fragment:c,on_mount:l,on_destroy:o,after_update:u}=t.$$;c&&c.m(n,e),i||j(()=>{const r=l.map(B).filter(W);o?o.push(...r):b(r),t.$$.on_mount=[]}),u.forEach(j)}function ut(t,n){const e=t.$$;e.fragment!==null&&(b(e.on_destroy),e.fragment&&e.fragment.d(n),e.on_destroy=e.fragment=null,e.ctx=[])}function st(t,n){t.$$.dirty[0]===-1&&(m.push(t),F(),t.$$.dirty.fill(0)),t.$$.dirty[n/31|0]|=1<<n%31}function It(t,n,e,i,c,l,o,u=[-1]){const r=y;p(t);const s=t.$$={fragment:null,ctx:null,props:l,update:h,not_equal:c,bound:T(),on_mount:[],on_destroy:[],on_disconnect:[],before_update:[],after_update:[],context:new Map(n.context||(r?r.$$.context:[])),callbacks:T(),dirty:u,skip_bound:!1,root:n.target||r.$$.root};o&&o(s.root);let a=!1;if(s.ctx=e?e(t,n.props||{},(f,g,...q)=>{const C=q.length?q[0]:g;return s.ctx&&c(s.ctx[f],s.ctx[f]=C)&&(!s.skip_bound&&s.bound[f]&&s.bound[f](C),a&&st(t,f)),g}):[],s.update(),a=!0,b(s.before_update),s.fragment=i?i(s.ctx):!1,n.target){if(n.hydrate){Q();const f=nt(n.target);s.fragment&&s.fragment.l(f),f.forEach(Y)}else s.fragment&&s.fragment.c();n.intro&&ct(t.$$.fragment),ot(t,n.target,n.anchor,n.customElement),R(),H()}p(r)}class Wt{$destroy(){ut(this,1),this.$destroy=h}$on(n,e){const i=this.$$.callbacks[n]||(this.$$.callbacks[n]=[]);return i.push(e),()=>{const c=i.indexOf(e);c!==-1&&i.splice(c,1)}}$set(n){this.$$set&&!J(n)&&(this.$$.skip_bound=!0,this.$$set(n),this.$$.skip_bound=!1)}}const _=[];function Gt(t,n=h){let e;const i=new Set;function c(u){if(G(t,u)&&(t=u,e)){const r=!_.length;for(const s of i)s[1](),_.push(s,t);if(r){for(let s=0;s<_.length;s+=2)_[s][0](_[s+1]);_.length=0}}}function l(u){c(u(t))}function o(u,r=h){const s=[u,r];return i.add(s),i.size===1&&(e=n(c)||h),u(t),()=>{i.delete(s),i.size===0&&(e(),e=null)}}return{set:c,update:l,subscribe:o}}export{Dt as A,ut as B,I as C,Gt as D,Mt as E,at as F,X as G,_t as H,ht as I,dt as J,bt as K,h as L,jt as M,lt as N,tt as O,kt as P,wt as Q,Ct as R,Wt as S,b as T,ft as U,pt as V,Ot as W,M as X,W as Y,mt as Z,nt as a,$t as b,Et as c,Y as d,Z as e,Nt as f,yt as g,it as h,It as i,At as j,gt as k,xt as l,St as m,zt as n,Lt as o,Bt as p,ct as q,Tt as r,G as s,v as t,qt as u,vt as v,Ft as w,Ht as x,ot as y,Pt as z};
474
0
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/_app
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/_app/chunks/Markdown-hf-doc-builder.js
import{D as Je,S as q,i as Y,s as G,O as d,P as p,a as c,d as o,b as a,g as J,G as h,L as H,t as R,h as Z,e as j,w as U,k as X,c as B,x as ae,m as Q,y as ee,Q as Le,q as $,o as C,B as te,n as le,p as se,U as Oe,v as Pe,V as Me,W as Ve,X as Fe,F as ve,H as Ee,I as ke,J as $e}from"./vendor-hf-doc-builder.js";var D=(n=>(n.OPEN="OPEN",n.CLOSED="CLOSED",n.HASHASHLINK="HASHASHLINK",n))(D||{});const Ne={};function Re(n){return Ne[n]||(Ne[n]=Je("OPEN")),Ne[n]}function Ze(n){let e,s,l,t,r,i;return{c(){e=d("svg"),s=d("defs"),l=d("clipPath"),t=d("rect"),r=d("g"),i=d("path"),this.h()},l(u){e=p(u,"svg",{class:!0,xmlns:!0,"xmlns:xlink":!0,"aria-hidden":!0,focusable:!0,role:!0,width:!0,height:!0,preserveAspectRatio:!0,viewBox:!0});var f=c(e);s=p(f,"defs",{});var g=c(s);l=p(g,"clipPath",{id:!0});var v=c(l);t=p(v,"rect",{x:!0,y:!0,width:!0,height:!0,fill:!0}),c(t).forEach(o),v.forEach(o),g.forEach(o),r=p(f,"g",{"clip-path":!0});var m=c(r);i=p(m,"path",{d:!0,fill:!0}),c(i).forEach(o),m.forEach(o),f.forEach(o),this.h()},h(){a(t,"x","3.05"),a(t,"y","0.5"),a(t,"width","25.73"),a(t,"height","31"),a(t,"fill","none"),a(l,"id","a"),a(i,"d","M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z"),a(i,"fill","#ee4c2c"),a(r,"clip-path","url(#a)"),a(e,"class",n[0]),a(e,"xmlns","http://www.w3.org/2000/svg"),a(e,"xmlns:xlink","http://www.w3.org/1999/xlink"),a(e,"aria-hidden","true"),a(e,"focusable","false"),a(e,"role","img"),a(e,"width","1em"),a(e,"height","1em"),a(e,"preserveAspectRatio","xMidYMid meet"),a(e,"viewBox","0 0 32 32")},m(u,f){J(u,e,f),h(e,s),h(s,l),h(l,t),h(e,r),h(r,i)},p(u,[f]){f&1&&a(e,"class",u[0])},i:H,o:H,d(u){u&&o(e)}}}function qe(n,e,s){let{classNames:l=""}=e;return n.$$set=t=>{"classNames"in t&&s(0,l=t.classNames)},[l]}class Ye extends q{constructor(e){super();Y(this,e,qe,Ze,G,{classNames:0})}}function Ge(n){let e,s,l,t;return{c(){e=d("svg"),s=d("path"),l=d("path"),t=d("path"),this.h()},l(r){e=p(r,"svg",{class:!0,xmlns:!0,"xmlns:xlink":!0,"aria-hidden":!0,focusable:!0,role:!0,width:!0,height:!0,preserveAspectRatio:!0,viewBox:!0});var i=c(e);s=p(i,"path",{d:!0,fill:!0}),c(s).forEach(o),l=p(i,"path",{d:!0,fill:!0}),c(l).forEach(o),t=p(i,"path",{d:!0,fill:!0}),c(t).forEach(o),i.forEach(o),this.h()},h(){a(s,"d","M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z"),a(s,"fill","#E55B2D"),a(l,"d","M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z"),a(l,"fill","#ED8E24"),a(t,"d","M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z"),a(t,"fill","#F8BF3C"),a(e,"class",n[0]),a(e,"xmlns","http://www.w3.org/2000/svg"),a(e,"xmlns:xlink","http://www.w3.org/1999/xlink"),a(e,"aria-hidden","true"),a(e,"focusable","false"),a(e,"role","img"),a(e,"width","0.94em"),a(e,"height","1em"),a(e,"preserveAspectRatio","xMidYMid meet"),a(e,"viewBox","0 0 256 274")},m(r,i){J(r,e,i),h(e,s),h(e,l),h(e,t)},p(r,[i]){i&1&&a(e,"class",r[0])},i:H,o:H,d(r){r&&o(e)}}}function Te(n,e,s){let{classNames:l=""}=e;return n.$$set=t=>{"classNames"in t&&s(0,l=t.classNames)},[l]}class We extends q{constructor(e){super();Y(this,e,Te,Ge,G,{classNames:0})}}function Xe(n){let e,s,l,t,r,i,u,f,g,v,m,k,E,N,x,A,w,z,S,y,_,b,I,M,V,re,ne,oe,ie,he,ce,T,K,fe,ue,de,pe,O,me,ge,_e;return{c(){e=d("svg"),s=d("style"),l=R(`.J { stroke: #dce0df; } .K { stroke-linejoin: round; } `),t=d("g"),r=d("path"),i=d("path"),u=d("path"),f=d("path"),g=d("path"),v=d("path"),m=d("path"),k=d("path"),E=d("g"),N=d("path"),x=d("path"),A=d("path"),w=d("g"),z=d("path"),S=d("path"),y=d("path"),_=d("g"),b=d("path"),I=d("path"),M=d("g"),V=d("path"),re=d("path"),ne=d("path"),oe=d("path"),ie=d("path"),he=d("path"),ce=d("path"),T=d("path"),K=d("g"),fe=d("path"),ue=d("path"),de=d("path"),pe=d("path"),O=d("g"),me=d("path"),ge=d("path"),_e=d("path"),this.h()},l(W){e=p(W,"svg",{class:!0,xmlns:!0,"xmlns:xlink":!0,"aria-hidden":!0,focusable:!0,role:!0,width:!0,height:!0,preserveAspectRatio:!0,viewBox:!0});var L=c(e);s=p(L,"style",{});var Ie=c(s);l=Z(Ie,`.J { stroke: #dce0df; } .K { stroke-linejoin: round; } `),Ie.forEach(o),t=p(L,"g",{fill:!0,class:!0});var P=c(t);r=p(P,"path",{d:!0}),c(r).forEach(o),i=p(P,"path",{d:!0}),c(i).forEach(o),u=p(P,"path",{d:!0}),c(u).forEach(o),f=p(P,"path",{d:!0}),c(f).forEach(o),g=p(P,"path",{d:!0}),c(g).forEach(o),v=p(P,"path",{d:!0}),c(v).forEach(o),m=p(P,"path",{d:!0}),c(m).forEach(o),k=p(P,"path",{d:!0}),c(k).forEach(o),P.forEach(o),E=p(L,"g",{fill:!0,class:!0});var ze=c(E);N=p(ze,"path",{d:!0}),c(N).forEach(o),x=p(ze,"path",{d:!0}),c(x).forEach(o),A=p(ze,"path",{d:!0}),c(A).forEach(o),ze.forEach(o),w=p(L,"g",{fill:!0,class:!0});var be=c(w);z=p(be,"path",{d:!0}),c(z).forEach(o),S=p(be,"path",{d:!0}),c(S).forEach(o),be.forEach(o),y=p(L,"path",{d:!0,fill:!0,class:!0}),c(y).forEach(o),_=p(L,"g",{fill:!0,class:!0});var Se=c(_);b=p(Se,"path",{d:!0}),c(b).forEach(o),I=p(Se,"path",{d:!0}),c(I).forEach(o),Se.forEach(o),M=p(L,"g",{fill:!0,class:!0});var F=c(M);V=p(F,"path",{d:!0}),c(V).forEach(o),re=p(F,"path",{d:!0}),c(re).forEach(o),ne=p(F,"path",{d:!0}),c(ne).forEach(o),oe=p(F,"path",{d:!0}),c(oe).forEach(o),ie=p(F,"path",{d:!0}),c(ie).forEach(o),he=p(F,"path",{d:!0}),c(he).forEach(o),ce=p(F,"path",{d:!0}),c(ce).forEach(o),F.forEach(o),T=p(L,"path",{d:!0,fill:!0,class:!0}),c(T).forEach(o),K=p(L,"g",{fill:!0,class:!0});var we=c(K);fe=p(we,"path",{d:!0}),c(fe).forEach(o),ue=p(we,"path",{d:!0}),c(ue).forEach(o),de=p(we,"path",{d:!0}),c(de).forEach(o),pe=p(we,"path",{d:!0}),c(pe).forEach(o),we.forEach(o),O=p(L,"g",{fill:!0,class:!0});var Ce=c(O);me=p(Ce,"path",{d:!0}),c(me).forEach(o),ge=p(Ce,"path",{d:!0}),c(ge).forEach(o),_e=p(Ce,"path",{d:!0}),c(_e).forEach(o),Ce.forEach(o),L.forEach(o),this.h()},h(){a(r,"d","M50.5 130.4l-25 43.31h50l25-43.31h-50z"),a(i,"d","M.5 217.01l25-43.3h50l-25 43.3H.5z"),a(u,"d","M125.5 173.71h-50l-25 43.3h50l25-43.3z"),a(f,"d","M175.5 173.71h-50l-25 43.3h50l25-43.3z"),a(g,"d","M150.5 130.4l-25 43.31h50l25-43.31h-50z"),a(v,"d","M175.5 87.1l-25 43.3h50l25-43.3h-50z"),a(m,"d","M200.5 43.8l-25 43.3h50l25-43.3h-50z"),a(k,"d","M225.5.5l-25 43.3h50l25-43.3h-50z"),a(t,"fill","#5e97f6"),a(t,"class","J K"),a(N,"d","M.5 217.01l25 43.3h50l-25-43.3H.5z"),a(x,"d","M125.5 260.31h-50l-25-43.3h50l25 43.3z"),a(A,"d","M175.5 260.31h-50l-25-43.3h50l25 43.3z"),a(E,"fill","#2a56c6"),a(E,"class","J K"),a(z,"d","M200.5 217.01l-25-43.3-25 43.3 25 43.3 25-43.3zm50-86.61l-25-43.3-25 43.3h50z"),a(S,"d","M250.5 43.8l-25 43.3 25 43.3 25-43.3-25-43.3z"),a(w,"fill","#00796b"),a(w,"class","J K"),a(y,"d","M125.5 173.71l-25-43.31-25 43.31h50z"),a(y,"fill","#3367d6"),a(y,"class","J K"),a(b,"d","M250.5 130.4h-50l-25 43.31h50l25-43.31z"),a(I,"d","M300.5 130.4h-50l-25 43.31h50l25-43.31z"),a(_,"fill","#26a69a"),a(_,"class","J K"),a(V,"d","M350.5 43.8L325.5.5l-25 43.3 25 43.3 25-43.3z"),a(re,"d","M375.5 87.1l-25-43.3-25 43.3 25 43.3 25-43.3z"),a(ne,"d","M400.5 130.4l-25-43.3-25 43.3 25 43.31 25-43.31z"),a(oe,"d","M425.5 173.71l-25-43.31-25 43.31 25 43.3 25-43.3z"),a(ie,"d","M450.5 217.01l-25-43.3-25 43.3 25 43.3 25-43.3zM425.5.5l-25 43.3 25 43.3 25-43.3-25-43.3z"),a(he,"d","M375.5 87.1l25-43.3 25 43.3-25 43.3-25-43.3zm-25 43.3l-25 43.31 25 43.3 25-43.3-25-43.31z"),a(ce,"d","M325.5 260.31l-25-43.3 25-43.3 25 43.3-25 43.3z"),a(M,"fill","#9c27b0"),a(M,"class","J K"),a(T,"d","M275.5 260.31l-25-43.3h50l25 43.3h-50z"),a(T,"fill","#6a1b9a"),a(T,"class","J K"),a(fe,"d","M225.5 173.71h-50l25 43.3h50l-25-43.3z"),a(ue,"d","M275.5 173.71h-50l25 43.3 25-43.3zm0-86.61l25 43.3h50l-25-43.3h-50z"),a(de,"d","M300.5 43.8h-50l25 43.3h50l-25-43.3zm125 216.51l-25-43.3h-50l25 43.3h50z"),a(pe,"d","M375.5 173.71l-25 43.3h50l-25-43.3z"),a(K,"fill","#00695c"),a(K,"class","J K"),a(me,"d","M325.5.5h-50l-25 43.3h50l25-43.3zm0 173.21h-50l-25 43.3h50l25-43.3z"),a(ge,"d","M350.5 130.4h-50l-25 43.31h50l25-43.31zM425.5.5h-50l-25 43.3h50l25-43.3z"),a(_e,"d","M375.5 87.1l-25-43.3h50l-25 43.3z"),a(O,"fill","#ea80fc"),a(O,"class","J K"),a(e,"class",n[0]),a(e,"xmlns","http://www.w3.org/2000/svg"),a(e,"xmlns:xlink","http://www.w3.org/1999/xlink"),a(e,"aria-hidden","true"),a(e,"focusable","false"),a(e,"role","img"),a(e,"width","1.73em"),a(e,"height","1em"),a(e,"preserveAspectRatio","xMidYMid meet"),a(e,"viewBox","0 0 451 260.81")},m(W,L){J(W,e,L),h(e,s),h(s,l),h(e,t),h(t,r),h(t,i),h(t,u),h(t,f),h(t,g),h(t,v),h(t,m),h(t,k),h(e,E),h(E,N),h(E,x),h(E,A),h(e,w),h(w,z),h(w,S),h(e,y),h(e,_),h(_,b),h(_,I),h(e,M),h(M,V),h(M,re),h(M,ne),h(M,oe),h(M,ie),h(M,he),h(M,ce),h(e,T),h(e,K),h(K,fe),h(K,ue),h(K,de),h(K,pe),h(e,O),h(O,me),h(O,ge),h(O,_e)},p(W,[L]){L&1&&a(e,"class",W[0])},i:H,o:H,d(W){W&&o(e)}}}function Qe(n,e,s){let{classNames:l=""}=e;return n.$$set=t=>{"classNames"in t&&s(0,l=t.classNames)},[l]}class Ue extends q{constructor(e){super();Y(this,e,Qe,Xe,G,{classNames:0})}}function et(n){let e,s;return{c(){e=d("svg"),s=d("path"),this.h()},l(l){e=p(l,"svg",{class:!0,width:!0,height:!0,viewBox:!0,fill:!0,xmlns:!0});var t=c(e);s=p(t,"path",{d:!0,fill:!0}),c(s).forEach(o),t.forEach(o),this.h()},h(){a(s,"d","M0 4.50001C0.390979 2.37042 2.25728 0.756592 4.5 0.756592C6.74272 0.756592 8.60861 2.37042 9 4.50001C8.60902 6.62959 6.74272 8.24342 4.5 8.24342C2.25728 8.24342 0.391395 6.62959 0 4.50001ZM4.5 6.57968C5.05156 6.57968 5.58054 6.36057 5.97055 5.97056C6.36057 5.58054 6.57967 5.05157 6.57967 4.50001C6.57967 3.94844 6.36057 3.41947 5.97055 3.02945C5.58054 2.63944 5.05156 2.42033 4.5 2.42033C3.94844 2.42033 3.41946 2.63944 3.02945 3.02945C2.63943 3.41947 2.42033 3.94844 2.42033 4.50001C2.42033 5.05157 2.63943 5.58054 3.02945 5.97056C3.41946 6.36057 3.94844 6.57968 4.5 6.57968ZM4.5 5.74781C4.16906 5.74781 3.85168 5.61635 3.61767 5.38234C3.38366 5.14833 3.2522 4.83094 3.2522 4.50001C3.2522 4.16907 3.38366 3.85168 3.61767 3.61767C3.85168 3.38367 4.16906 3.2522 4.5 3.2522C4.83094 3.2522 5.14832 3.38367 5.38233 3.61767C5.61634 3.85168 5.7478 4.16907 5.7478 4.50001C5.7478 4.83094 5.61634 5.14833 5.38233 5.38234C5.14832 5.61635 4.83094 5.74781 4.5 5.74781Z"),a(s,"fill","currentColor"),a(e,"class",n[0]),a(e,"width",n[1]),a(e,"height",n[1]),a(e,"viewBox","0 0 9 9"),a(e,"fill","currentColor"),a(e,"xmlns","http://www.w3.org/2000/svg")},m(l,t){J(l,e,t),h(e,s)},p(l,[t]){t&1&&a(e,"class",l[0]),t&2&&a(e,"width",l[1]),t&2&&a(e,"height",l[1])},i:H,o:H,d(l){l&&o(e)}}}function tt(n,e,s){let{classNames:l=""}=e,{size:t="1em"}=e;return n.$$set=r=>{"classNames"in r&&s(0,l=r.classNames),"size"in r&&s(1,t=r.size)},[l,t]}class lt extends q{constructor(e){super();Y(this,e,tt,et,G,{classNames:0,size:1})}}function st(n){let e,s;return{c(){e=d("svg"),s=d("path"),this.h()},l(l){e=p(l,"svg",{class:!0,width:!0,height:!0,viewBox:!0,fill:!0,xmlns:!0});var t=c(e);s=p(t,"path",{d:!0,fill:!0}),c(s).forEach(o),t.forEach(o),this.h()},h(){a(s,"d","M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z"),a(s,"fill","currentColor"),a(e,"class",n[0]),a(e,"width",n[1]),a(e,"height",n[1]),a(e,"viewBox","0 0 10 9"),a(e,"fill","currentColor"),a(e,"xmlns","http://www.w3.org/2000/svg")},m(l,t){J(l,e,t),h(e,s)},p(l,[t]){t&1&&a(e,"class",l[0]),t&2&&a(e,"width",l[1]),t&2&&a(e,"height",l[1])},i:H,o:H,d(l){l&&o(e)}}}function at(n,e,s){let{classNames:l=""}=e,{size:t="1em"}=e;return n.$$set=r=>{"classNames"in r&&s(0,l=r.classNames),"size"in r&&s(1,t=r.size)},[l,t]}class rt extends q{constructor(e){super();Y(this,e,at,st,G,{classNames:0,size:1})}}const{window:nt}=Ve;function ye(n){let e,s,l,t,r,i,u,f,g,v;return s=new rt({props:{size:"0.9em"}}),{c(){e=j("div"),U(s.$$.fragment),l=X(),t=j("span"),r=R("Hide "),i=R(n[3]),u=R(" content"),this.h()},l(m){e=B(m,"DIV",{class:!0});var k=c(e);ae(s.$$.fragment,k),l=Q(k),t=B(k,"SPAN",{});var E=c(t);r=Z(E,"Hide "),i=Z(E,n[3]),u=Z(E," content"),E.forEach(o),k.forEach(o),this.h()},h(){a(e,"class","cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none")},m(m,k){J(m,e,k),ee(s,e,null),h(e,l),h(e,t),h(t,r),h(t,i),h(t,u),f=!0,g||(v=Le(e,"click",n[5]),g=!0)},p:H,i(m){f||($(s.$$.fragment,m),f=!0)},o(m){C(s.$$.fragment,m),f=!1},d(m){m&&o(e),te(s),g=!1,v()}}}function ot(n){let e,s;const l=n[10].default,t=ve(l,n,n[9],null);return{c(){e=j("div"),t&&t.c(),this.h()},l(r){e=B(r,"DIV",{class:!0});var i=c(e);t&&t.l(i),i.forEach(o),this.h()},h(){a(e,"class","framework-content")},m(r,i){J(r,e,i),t&&t.m(e,null),s=!0},p(r,i){t&&t.p&&(!s||i&512)&&Ee(t,l,r,r[9],s?$e(l,r[9],i,null):ke(r[9]),null)},i(r){s||($(t,r),s=!0)},o(r){C(t,r),s=!1},d(r){r&&o(e),t&&t.d(r)}}}function it(n){let e,s,l,t,r,i,u,f,g,v;return s=new lt({props:{size:"0.9em"}}),{c(){e=j("div"),U(s.$$.fragment),l=X(),t=j("span"),r=R("Show "),i=R(n[3]),u=R(" content"),this.h()},l(m){e=B(m,"DIV",{class:!0});var k=c(e);ae(s.$$.fragment,k),l=Q(k),t=B(k,"SPAN",{});var E=c(t);r=Z(E,"Show "),i=Z(E,n[3]),u=Z(E," content"),E.forEach(o),k.forEach(o),this.h()},h(){a(e,"class","cursor-pointer mt-[-12.5px] flex items-center justify-center space-x-1 py-4 text-sm hover:underline leading-none")},m(m,k){J(m,e,k),ee(s,e,null),h(e,l),h(e,t),h(t,r),h(t,i),h(t,u),f=!0,g||(v=Le(e,"click",n[5]),g=!0)},p:H,i(m){f||($(s.$$.fragment,m),f=!0)},o(m){C(s.$$.fragment,m),f=!1},d(m){m&&o(e),te(s),g=!1,v()}}}function ht(n){let e,s,l,t,r,i,u,f,g,v,m,k,E,N;var x=n[2];function A(_){return{}}x&&(t=new x(A()));let w=!n[1]&&ye(n);const z=[it,ot],S=[];function y(_,b){return _[1]?0:1}return v=y(n),m=S[v]=z[v](n),{c(){e=j("div"),s=j("div"),l=j("div"),t&&U(t.$$.fragment),r=X(),i=j("span"),u=R(n[3]),f=X(),w&&w.c(),g=X(),m.c(),this.h()},l(_){e=B(_,"DIV",{class:!0});var b=c(e);s=B(b,"DIV",{class:!0});var I=c(s);l=B(I,"DIV",{class:!0});var M=c(l);t&&ae(t.$$.fragment,M),r=Q(M),i=B(M,"SPAN",{});var V=c(i);u=Z(V,n[3]),V.forEach(o),M.forEach(o),f=Q(I),w&&w.l(I),I.forEach(o),g=Q(b),m.l(b),b.forEach(o),this.h()},h(){a(l,"class","flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"),a(s,"class","flex h-[22px] mt-[-12.5px] justify-between leading-none"),a(e,"class","border border-gray-200 rounded-xl px-4 relative")},m(_,b){J(_,e,b),h(e,s),h(s,l),t&&ee(t,l,null),h(l,r),h(l,i),h(i,u),h(s,f),w&&w.m(s,null),h(e,g),S[v].m(e,null),n[11](e),k=!0,E||(N=Le(nt,"hashchange",n[6]),E=!0)},p(_,[b]){if(x!==(x=_[2])){if(t){le();const M=t;C(M.$$.fragment,1,0,()=>{te(M,1)}),se()}x?(t=new x(A()),U(t.$$.fragment),$(t.$$.fragment,1),ee(t,l,r)):t=null}_[1]?w&&(le(),C(w,1,1,()=>{w=null}),se()):w?(w.p(_,b),b&2&&$(w,1)):(w=ye(_),w.c(),$(w,1),w.m(s,null));let I=v;v=y(_),v===I?S[v].p(_,b):(le(),C(S[I],1,1,()=>{S[I]=null}),se(),m=S[v],m?m.p(_,b):(m=S[v]=z[v](_),m.c()),$(m,1),m.m(e,null))},i(_){k||(t&&$(t.$$.fragment,_),$(w),$(m),k=!0)},o(_){t&&C(t.$$.fragment,_),C(w),C(m),k=!1},d(_){_&&o(e),t&&te(t),w&&w.d(),S[v].d(),n[11](null),E=!1,N()}}}function ct(n,e,s){let l,t,{$$slots:r={},$$scope:i}=e,{framework:u}=e,f,g=new Set;const v={pytorch:{Icon:Ye,label:"Pytorch"},tensorflow:{Icon:We,label:"TensorFlow"},jax:{Icon:Ue,label:"JAX"}},{Icon:m,label:k}=v[u],E=`hf_doc_framework_${u}_is_hidden`,N=Re(u);Oe(n,N,z=>s(8,t=z));function x(){Me(N,t=t!==D.CLOSED?D.CLOSED:D.OPEN,t),localStorage.setItem(E,t)}function A(){const z=window.location.hash.slice(1);g.has(z)&&(Me(N,t=D.HASHASHLINK,t),localStorage.setItem(E,t))}Pe(()=>{const z=window.location.hash.slice(1),S="header-link",y=f.querySelectorAll(`.${S}`);g=new Set([...y].map(b=>b.id));const _=localStorage.getItem(E);g.has(z)?Me(N,t=D.HASHASHLINK,t):_===D.CLOSED&&t!==D.HASHASHLINK&&Me(N,t=D.CLOSED,t)});function w(z){Fe[z?"unshift":"push"](()=>{f=z,s(0,f)})}return n.$$set=z=>{"framework"in z&&s(7,u=z.framework),"$$scope"in z&&s(9,i=z.$$scope)},n.$$.update=()=>{n.$$.dirty&256&&s(1,l=t===D.CLOSED)},[f,l,m,k,N,x,A,u,t,i,r,w]}class xe extends q{constructor(e){super();Y(this,e,ct,ht,G,{framework:7})}}const ft=n=>({}),He=n=>({}),ut=n=>({}),Ae=n=>({}),dt=n=>({}),Ke=n=>({});function De(n){let e,s;return e=new xe({props:{framework:"pytorch",$$slots:{default:[pt]},$$scope:{ctx:n}}}),{c(){U(e.$$.fragment)},l(l){ae(e.$$.fragment,l)},m(l,t){ee(e,l,t),s=!0},p(l,t){const r={};t&16&&(r.$$scope={dirty:t,ctx:l}),e.$set(r)},i(l){s||($(e.$$.fragment,l),s=!0)},o(l){C(e.$$.fragment,l),s=!1},d(l){te(e,l)}}}function pt(n){let e;const s=n[3].pytorch,l=ve(s,n,n[4],Ke);return{c(){l&&l.c()},l(t){l&&l.l(t)},m(t,r){l&&l.m(t,r),e=!0},p(t,r){l&&l.p&&(!e||r&16)&&Ee(l,s,t,t[4],e?$e(s,t[4],r,dt):ke(t[4]),Ke)},i(t){e||($(l,t),e=!0)},o(t){C(l,t),e=!1},d(t){l&&l.d(t)}}}function je(n){let e,s;return e=new xe({props:{framework:"tensorflow",$$slots:{default:[mt]},$$scope:{ctx:n}}}),{c(){U(e.$$.fragment)},l(l){ae(e.$$.fragment,l)},m(l,t){ee(e,l,t),s=!0},p(l,t){const r={};t&16&&(r.$$scope={dirty:t,ctx:l}),e.$set(r)},i(l){s||($(e.$$.fragment,l),s=!0)},o(l){C(e.$$.fragment,l),s=!1},d(l){te(e,l)}}}function mt(n){let e;const s=n[3].tensorflow,l=ve(s,n,n[4],Ae);return{c(){l&&l.c()},l(t){l&&l.l(t)},m(t,r){l&&l.m(t,r),e=!0},p(t,r){l&&l.p&&(!e||r&16)&&Ee(l,s,t,t[4],e?$e(s,t[4],r,ut):ke(t[4]),Ae)},i(t){e||($(l,t),e=!0)},o(t){C(l,t),e=!1},d(t){l&&l.d(t)}}}function Be(n){let e,s;return e=new xe({props:{framework:"jax",$$slots:{default:[gt]},$$scope:{ctx:n}}}),{c(){U(e.$$.fragment)},l(l){ae(e.$$.fragment,l)},m(l,t){ee(e,l,t),s=!0},p(l,t){const r={};t&16&&(r.$$scope={dirty:t,ctx:l}),e.$set(r)},i(l){s||($(e.$$.fragment,l),s=!0)},o(l){C(e.$$.fragment,l),s=!1},d(l){te(e,l)}}}function gt(n){let e;const s=n[3].jax,l=ve(s,n,n[4],He);return{c(){l&&l.c()},l(t){l&&l.l(t)},m(t,r){l&&l.m(t,r),e=!0},p(t,r){l&&l.p&&(!e||r&16)&&Ee(l,s,t,t[4],e?$e(s,t[4],r,ft):ke(t[4]),He)},i(t){e||($(l,t),e=!0)},o(t){C(l,t),e=!1},d(t){l&&l.d(t)}}}function _t(n){let e,s,l,t,r=n[0]&&De(n),i=n[1]&&je(n),u=n[2]&&Be(n);return{c(){e=j("div"),r&&r.c(),s=X(),i&&i.c(),l=X(),u&&u.c(),this.h()},l(f){e=B(f,"DIV",{class:!0});var g=c(e);r&&r.l(g),s=Q(g),i&&i.l(g),l=Q(g),u&&u.l(g),g.forEach(o),this.h()},h(){a(e,"class","space-y-10 py-6 2xl:py-8 2xl:-mx-4")},m(f,g){J(f,e,g),r&&r.m(e,null),h(e,s),i&&i.m(e,null),h(e,l),u&&u.m(e,null),t=!0},p(f,[g]){f[0]?r?(r.p(f,g),g&1&&$(r,1)):(r=De(f),r.c(),$(r,1),r.m(e,s)):r&&(le(),C(r,1,1,()=>{r=null}),se()),f[1]?i?(i.p(f,g),g&2&&$(i,1)):(i=je(f),i.c(),$(i,1),i.m(e,l)):i&&(le(),C(i,1,1,()=>{i=null}),se()),f[2]?u?(u.p(f,g),g&4&&$(u,1)):(u=Be(f),u.c(),$(u,1),u.m(e,null)):u&&(le(),C(u,1,1,()=>{u=null}),se())},i(f){t||($(r),$(i),$(u),t=!0)},o(f){C(r),C(i),C(u),t=!1},d(f){f&&o(e),r&&r.d(),i&&i.d(),u&&u.d()}}}function wt(n,e,s){let{$$slots:l={},$$scope:t}=e,{pytorch:r=!1}=e,{tensorflow:i=!1}=e,{jax:u=!1}=e;return n.$$set=f=>{"pytorch"in f&&s(0,r=f.pytorch),"tensorflow"in f&&s(1,i=f.tensorflow),"jax"in f&&s(2,u=f.jax),"$$scope"in f&&s(4,t=f.$$scope)},[r,i,u,l,t]}class $t extends q{constructor(e){super();Y(this,e,wt,_t,G,{pytorch:0,tensorflow:1,jax:2})}}function vt(n){let e;const s=n[1].default,l=ve(s,n,n[0],null);return{c(){l&&l.c()},l(t){l&&l.l(t)},m(t,r){l&&l.m(t,r),e=!0},p(t,[r]){l&&l.p&&(!e||r&1)&&Ee(l,s,t,t[0],e?$e(s,t[0],r,null):ke(t[0]),null)},i(t){e||($(l,t),e=!0)},o(t){C(l,t),e=!1},d(t){l&&l.d(t)}}}function Et(n,e,s){let{$$slots:l={},$$scope:t}=e;return n.$$set=r=>{"$$scope"in r&&s(0,t=r.$$scope)},[t,l]}class zt extends q{constructor(e){super();Y(this,e,Et,vt,G,{})}}export{$t as F,zt as M};
475
0
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/_app
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/_app/chunks/CodeBlock-hf-doc-builder.js
import{S as C,i as E,s as x,O as y,P as w,a as d,d as h,b as u,g as T,G as g,L as N,e as b,k as I,t as R,c as p,m as A,h as L,f as O,j as S,w as V,x as H,y as k,Q as v,q as M,o as B,B as D,R as q,T as P}from"./vendor-hf-doc-builder.js";function Z(a){const e=document.createElement("textarea");document.body.appendChild(e),e.value=a,e.select(),document.execCommand("copy"),document.body.removeChild(e)}function j(a){let e,t,l,n;return{c(){e=y("svg"),t=y("path"),l=y("path"),n=y("rect"),this.h()},l(r){e=w(r,"svg",{class:!0,xmlns:!0,"aria-hidden":!0,fill:!0,focusable:!0,role:!0,width:!0,height:!0,preserveAspectRatio:!0,viewBox:!0});var s=d(e);t=w(s,"path",{d:!0,transform:!0}),d(t).forEach(h),l=w(s,"path",{d:!0,transform:!0}),d(l).forEach(h),n=w(s,"rect",{fill:!0,width:!0,height:!0}),d(n).forEach(h),s.forEach(h),this.h()},h(){u(t,"d","M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z"),u(t,"transform","translate(0)"),u(l,"d","M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z"),u(l,"transform","translate(0)"),u(n,"fill","none"),u(n,"width","32"),u(n,"height","32"),u(e,"class",a[0]),u(e,"xmlns","http://www.w3.org/2000/svg"),u(e,"aria-hidden","true"),u(e,"fill","currentColor"),u(e,"focusable","false"),u(e,"role","img"),u(e,"width","1em"),u(e,"height","1em"),u(e,"preserveAspectRatio","xMidYMid meet"),u(e,"viewBox","0 0 32 32")},m(r,s){T(r,e,s),g(e,t),g(e,l),g(e,n)},p(r,[s]){s&1&&u(e,"class",r[0])},i:N,o:N,d(r){r&&h(e)}}}function G(a,e,t){let{classNames:l=""}=e;return a.$$set=n=>{"classNames"in n&&t(0,l=n.classNames)},[l]}class Q extends C{constructor(e){super();E(this,e,G,j,x,{classNames:0})}}function U(a){let e,t,l,n,r;return{c(){e=b("div"),t=b("div"),l=I(),n=R(a[1]),this.h()},l(s){e=p(s,"DIV",{class:!0});var c=d(e);t=p(c,"DIV",{class:!0,style:!0}),d(t).forEach(h),l=A(c),n=L(c,a[1]),c.forEach(h),this.h()},h(){u(t,"class","absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0"),O(t,"border-left-color","transparent"),O(t,"border-right-color","transparent"),u(e,"class",r="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow "+a[2]+" "+a[0])},m(s,c){T(s,e,c),g(e,t),g(e,l),g(e,n)},p(s,[c]){c&2&&S(n,s[1]),c&5&&r!==(r="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow "+s[2]+" "+s[0])&&u(e,"class",r)},i:N,o:N,d(s){s&&h(e)}}}function Y(a,e,t){let{classNames:l=""}=e,{label:n="Copied"}=e,{position:r="left-1/2 top-full transform -translate-x-1/2 translate-y-2"}=e;return a.$$set=s=>{"classNames"in s&&t(0,l=s.classNames),"label"in s&&t(1,n=s.label),"position"in s&&t(2,r=s.position)},[l,n,r]}class z extends C{constructor(e){super();E(this,e,Y,U,x,{classNames:0,label:1,position:2})}}function F(a){let e,t,l,n,r,s,c,m,f;return t=new Q({}),n=new z({props:{classNames:a[4]?"opacity-100":"opacity-0"}}),{c(){e=b("button"),V(t.$$.fragment),l=I(),V(n.$$.fragment),this.h()},l(o){e=p(o,"BUTTON",{class:!0,title:!0,type:!0});var i=d(e);H(t.$$.fragment,i),l=A(i),H(n.$$.fragment,i),i.forEach(h),this.h()},h(){u(e,"class",r="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none "+a[0]+" "+(a[2]==="text"?"mx-0.5":"")+" "+(a[2]==="button"?"btn":"")+" "+(a[2]==="button-clear"?"py-1 px-2 border rounded-lg shadow-sm":"")+" "+(!a[4]&&["button-clear","text"].includes(a[2])?"text-gray-600":"")+" "+(a[4]?"text-green-500":"")),u(e,"title",s=a[3]||a[1]||"Copy to clipboard"),u(e,"type","button")},m(o,i){T(o,e,i),k(t,e,null),g(e,l),k(n,e,null),c=!0,m||(f=v(e,"click",a[5]),m=!0)},p(o,[i]){const _={};i&16&&(_.classNames=o[4]?"opacity-100":"opacity-0"),n.$set(_),(!c||i&21&&r!==(r="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none "+o[0]+" "+(o[2]==="text"?"mx-0.5":"")+" "+(o[2]==="button"?"btn":"")+" "+(o[2]==="button-clear"?"py-1 px-2 border rounded-lg shadow-sm":"")+" "+(!o[4]&&["button-clear","text"].includes(o[2])?"text-gray-600":"")+" "+(o[4]?"text-green-500":"")))&&u(e,"class",r),(!c||i&10&&s!==(s=o[3]||o[1]||"Copy to clipboard"))&&u(e,"title",s)},i(o){c||(M(t.$$.fragment,o),M(n.$$.fragment,o),c=!0)},o(o){B(t.$$.fragment,o),B(n.$$.fragment,o),c=!1},d(o){o&&h(e),D(t),D(n),m=!1,f()}}}function J(a,e,t){let{classNames:l=""}=e,{label:n=""}=e,{style:r="text"}=e,{title:s=""}=e,{value:c}=e,m=!1,f;q(()=>{f&&clearTimeout(f)});function o(){Z(c),t(4,m=!0),f&&clearTimeout(f),f=setTimeout(()=>{t(4,m=!1)},1e3)}return a.$$set=i=>{"classNames"in i&&t(0,l=i.classNames),"label"in i&&t(1,n=i.label),"style"in i&&t(2,r=i.style),"title"in i&&t(3,s=i.title),"value"in i&&t(6,c=i.value)},[l,n,r,s,m,o,c]}class K extends C{constructor(e){super();E(this,e,J,F,x,{classNames:0,label:1,style:2,title:3,value:6})}}function W(a){let e,t,l,n,r,s,c,m;return l=new K({props:{classNames:"transition duration-200 ease-in-out "+(a[2]&&"opacity-0"),label:"code excerpt",value:a[0]}}),{c(){e=b("div"),t=b("div"),V(l.$$.fragment),n=I(),r=b("pre"),this.h()},l(f){e=p(f,"DIV",{class:!0});var o=d(e);t=p(o,"DIV",{class:!0});var i=d(t);H(l.$$.fragment,i),i.forEach(h),n=A(o),r=p(o,"PRE",{});var _=d(r);_.forEach(h),o.forEach(h),this.h()},h(){u(t,"class","absolute top-2.5 right-4"),u(e,"class","code-block relative")},m(f,o){T(f,e,o),g(e,t),k(l,t,null),g(e,n),g(e,r),r.innerHTML=a[1],s=!0,c||(m=[v(e,"mouseover",a[3]),v(e,"focus",a[3]),v(e,"mouseout",a[4]),v(e,"blur",a[4])],c=!0)},p(f,[o]){const i={};o&4&&(i.classNames="transition duration-200 ease-in-out "+(f[2]&&"opacity-0")),o&1&&(i.value=f[0]),l.$set(i),(!s||o&2)&&(r.innerHTML=f[1])},i(f){s||(M(l.$$.fragment,f),s=!0)},o(f){B(l.$$.fragment,f),s=!1},d(f){f&&h(e),D(l),c=!1,P(m)}}}function X(a,e,t){let l=!0,{code:n=""}=e,{highlighted:r=""}=e;function s(){t(2,l=!1)}function c(){t(2,l=!0)}return a.$$set=m=>{"code"in m&&t(0,n=m.code),"highlighted"in m&&t(1,r=m.highlighted)},[n,r,l,s,c]}class ee extends C{constructor(e){super();E(this,e,X,W,x,{code:0,highlighted:1})}}export{ee as C};
476
0
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/_app
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/_app/chunks/paths-hf-doc-builder.js
let e="";function a(s){e=s.base,s.assets}export{e as b,a as s};
477
0
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/_app
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/_app/pages/__layout.svelte-hf-doc-builder.js
import{S as Q,i as R,s as W,l as S,g as y,q as C,o as F,d as u,F as Y,e as v,t as D,k as $,c as b,a as k,h as V,m as j,b as h,G as m,H as z,I as B,J as M,K as X,j as O}from"../chunks/vendor-hf-doc-builder.js";import{b as P}from"../chunks/paths-hf-doc-builder.js";function T(n,t,r){const a=n.slice();return a[3]=t[r],a}function Z(n){let t,r,a,e,l,o,w,c,d,q,g,A,x,E=n[0],f=[];for(let s=0;s<E.length;s+=1)f[s]=U(T(n,E,s));const L=n[2].default,_=Y(L,n,n[1],null);return{c(){t=v("style"),r=D(`body, html { padding: 0; margin: 0; }`),a=$(),e=v("div"),l=v("div"),o=v("ul");for(let s=0;s<f.length;s+=1)f[s].c();w=$(),c=v("div"),d=v("div"),_&&_.c(),q=$(),g=v("div"),A=D("Sub side menu"),this.h()},l(s){t=b(s,"STYLE",{});var p=k(t);r=V(p,`body, html { padding: 0; margin: 0; }`),p.forEach(u),a=j(s),e=b(s,"DIV",{class:!0});var i=k(e);l=b(i,"DIV",{class:!0});var I=k(l);o=b(I,"UL",{class:!0});var G=k(o);for(let N=0;N<f.length;N+=1)f[N].l(G);G.forEach(u),I.forEach(u),w=j(i),c=b(i,"DIV",{class:!0});var H=k(c);d=b(H,"DIV",{class:!0});var J=k(d);_&&_.l(J),J.forEach(u),H.forEach(u),q=j(i),g=b(i,"DIV",{class:!0});var K=k(g);A=V(K,"Sub side menu"),K.forEach(u),i.forEach(u),this.h()},h(){h(o,"class","pt-2 flex flex-col pl-3 w-full"),h(l,"class","w-[270px] 2xl:w-[300px] hidden md:block border-r-2 shrink-0"),h(d,"class","prose prose-doc dark:prose-light max-w-4xl mx-auto break-words relative"),h(c,"class","px-4 pt-3 grow"),h(g,"class","w-[270px] 2xl:w-[305px] hidden lg:block border-l-2 shrink-0 opacity-50 p-4"),h(e,"class","flex")},m(s,p){y(s,t,p),m(t,r),y(s,a,p),y(s,e,p),m(e,l),m(l,o);for(let i=0;i<f.length;i+=1)f[i].m(o,null);m(e,w),m(e,c),m(c,d),_&&_.m(d,null),m(e,q),m(e,g),m(g,A),x=!0},p(s,p){if(p&1){E=s[0];let i;for(i=0;i<E.length;i+=1){const I=T(s,E,i);f[i]?f[i].p(I,p):(f[i]=U(I),f[i].c(),f[i].m(o,null))}for(;i<f.length;i+=1)f[i].d(1);f.length=E.length}_&&_.p&&(!x||p&2)&&z(_,L,s,s[1],x?M(L,s[1],p,null):B(s[1]),null)},i(s){x||(C(_,s),x=!0)},o(s){F(_,s),x=!1},d(s){s&&u(t),s&&u(a),s&&u(e),X(f,s),_&&_.d(s)}}}function ee(n){let t;const r=n[2].default,a=Y(r,n,n[1],null);return{c(){a&&a.c()},l(e){a&&a.l(e)},m(e,l){a&&a.m(e,l),t=!0},p(e,l){a&&a.p&&(!t||l&2)&&z(a,r,e,e[1],t?M(r,e[1],l,null):B(e[1]),null)},i(e){t||(C(a,e),t=!0)},o(e){F(a,e),t=!1},d(e){a&&a.d(e)}}}function te(n){let t,r=n[3].title+"",a;return{c(){t=v("span"),a=D(r),this.h()},l(e){t=b(e,"SPAN",{role:!0,class:!0});var l=k(t);a=V(l,r),l.forEach(u),this.h()},h(){h(t,"role","navigation"),h(t,"class","opacity-50 text-lg block text-gray-500 pr-2 hover:text-black dark:hover:text-gray-300 py-1")},m(e,l){y(e,t,l),m(t,a)},p(e,l){l&1&&r!==(r=e[3].title+"")&&O(a,r)},d(e){e&&u(t)}}}function le(n){let t,r=n[3].title+"",a,e;return{c(){t=v("a"),a=D(r),this.h()},l(l){t=b(l,"A",{role:!0,class:!0,href:!0});var o=k(t);a=V(o,r),o.forEach(u),this.h()},h(){h(t,"role","navigation"),h(t,"class","block text-gray-500 pr-2 hover:text-black dark:hover:text-gray-300 py-1"),h(t,"href",e=P+"/"+n[3].local.replace(/\bindex$/,""))},m(l,o){y(l,t,o),m(t,a)},p(l,o){o&1&&r!==(r=l[3].title+"")&&O(a,r),o&1&&e!==(e=P+"/"+l[3].local.replace(/\bindex$/,""))&&h(t,"href",e)},d(l){l&&u(t)}}}function U(n){let t;function r(l,o){return l[3].local?le:te}let a=r(n),e=a(n);return{c(){e.c(),t=S()},l(l){e.l(l),t=S()},m(l,o){e.m(l,o),y(l,t,o)},p(l,o){a===(a=r(l))&&e?e.p(l,o):(e.d(1),e=a(l),e&&(e.c(),e.m(t.parentNode,t)))},d(l){e.d(l),l&&u(t)}}}function ae(n){let t,r,a,e;const l=[ee,Z],o=[];function w(c,d){return 0}return t=w(),r=o[t]=l[t](n),{c(){r.c(),a=S()},l(c){r.l(c),a=S()},m(c,d){o[t].m(c,d),y(c,a,d),e=!0},p(c,[d]){r.p(c,d)},i(c){e||(C(r),e=!0)},o(c){F(r),e=!1},d(c){o[t].d(c),c&&u(a)}}}async function ne(n){return{}}function re(n,t,r){let{$$slots:a={},$$scope:e}=t,{toc:l}=t;return n.$$set=o=>{"toc"in o&&r(0,l=o.toc),"$$scope"in o&&r(1,e=o.$$scope)},[l,e,a]}class ie extends Q{constructor(t){super();R(this,t,re,ae,W,{toc:0})}}export{ie as default,ne as load};
478
0
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/_app
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/_app/pages/accelerate.mdx-hf-doc-builder.js
import{S as Wo,i as Qo,s as Jo,e as t,k as m,w as u,t as c,M as Ro,c as s,d as a,m as d,a as l,x as f,h as p,b as n,G as o,g as i,y as h,L as Ko,q as v,o as _,B as $,v as Vo}from"../chunks/vendor-hf-doc-builder.js";import{I as ie}from"../chunks/IconCopyLink-hf-doc-builder.js";import{C as M}from"../chunks/CodeBlock-hf-doc-builder.js";function Xo(lo){let w,Oe,k,S,he,U,ga,ve,ba,De,q,wa,F,ka,Aa,He,A,C,_e,B,Ea,$e,ja,Ge,ne,Pa,Le,I,Me,g,ya,W,ge,za,Sa,be,qa,Ca,Ue,Q,Fe,E,N,we,J,Na,ke,Ta,Be,T,xa,R,Ae,Oa,Da,Ie,K,We,j,x,Ee,V,Ha,je,Ga,Qe,b,La,Pe,Ma,Ua,X,ye,Fa,Ba,Je,Y,Re,ce,Ia,Ke,Z,Ve,P,O,ze,ee,Wa,Se,Qa,Xe,pe,Ja,Ye,y,D,qe,ae,Ra,Ce,Ka,Ze,me,Va,ea,oe,aa,de,Xa,oa,re,ra,z,H,Ne,te,Ya,Te,Za,ta,G,eo,xe,ao,oo,sa,se,la,L,ro,le,to,so,ia;return U=new ie({}),B=new ie({}),I=new M({props:{code:"pip install accelerate",highlighted:"pip install accelerate"}}),Q=new M({props:{code:`from accelerate import Accelerator accelerator = Accelerator()`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> accelerate <span class="hljs-keyword">import</span> Accelerator <span class="hljs-meta">&gt;&gt;&gt; </span>accelerator = Accelerator()`}}),J=new ie({}),K=new M({props:{code:`train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare( train_dataloader, eval_dataloader, model, optimizer )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare( <span class="hljs-meta">... </span> train_dataloader, eval_dataloader, model, optimizer <span class="hljs-meta">... </span>)`}}),V=new ie({}),Y=new M({props:{code:`for epoch in range(num_epochs): for batch in train_dataloader: outputs = model(**batch) loss = outputs.loss accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> epoch <span class="hljs-keyword">in</span> <span class="hljs-built_in">range</span>(num_epochs): <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> batch <span class="hljs-keyword">in</span> train_dataloader: <span class="hljs-meta">... </span> outputs = model(**batch) <span class="hljs-meta">... </span> loss = outputs.loss <span class="hljs-meta">... </span> accelerator.backward(loss) <span class="hljs-meta">... </span> optimizer.step() <span class="hljs-meta">... </span> lr_scheduler.step() <span class="hljs-meta">... </span> optimizer.zero_grad() <span class="hljs-meta">... </span> progress_bar.update(<span class="hljs-number">1</span>)`}}),Z=new M({props:{code:`+ from accelerate import Accelerator from transformers import AdamW, AutoModelForSequenceClassification, get_scheduler + accelerator = Accelerator() model = AutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=2) optimizer = AdamW(model.parameters(), lr=3e-5) - device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") - model.to(device) + train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare( + train_dataloader, eval_dataloader, model, optimizer + ) num_epochs = 3 num_training_steps = num_epochs * len(train_dataloader) lr_scheduler = get_scheduler( "linear", optimizer=optimizer, num_warmup_steps=0, num_training_steps=num_training_steps ) progress_bar = tqdm(range(num_training_steps)) model.train() for epoch in range(num_epochs): for batch in train_dataloader: - batch = {k: v.to(device) for k, v in batch.items()} outputs = model(**batch) loss = outputs.loss - loss.backward() + accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1)`,highlighted:`<span class="hljs-addition">+ from accelerate import Accelerator</span> from transformers import AdamW, AutoModelForSequenceClassification, get_scheduler <span class="hljs-addition">+ accelerator = Accelerator()</span> model = AutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=2) optimizer = AdamW(model.parameters(), lr=3e-5) <span class="hljs-deletion">- device = torch.device(&quot;cuda&quot;) if torch.cuda.is_available() else torch.device(&quot;cpu&quot;)</span> <span class="hljs-deletion">- model.to(device)</span> <span class="hljs-addition">+ train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare(</span> <span class="hljs-addition">+ train_dataloader, eval_dataloader, model, optimizer</span> <span class="hljs-addition">+ )</span> num_epochs = 3 num_training_steps = num_epochs * len(train_dataloader) lr_scheduler = get_scheduler( &quot;linear&quot;, optimizer=optimizer, num_warmup_steps=0, num_training_steps=num_training_steps ) progress_bar = tqdm(range(num_training_steps)) model.train() for epoch in range(num_epochs): for batch in train_dataloader: <span class="hljs-deletion">- batch = {k: v.to(device) for k, v in batch.items()}</span> outputs = model(**batch) loss = outputs.loss <span class="hljs-deletion">- loss.backward()</span> <span class="hljs-addition">+ accelerator.backward(loss)</span> optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1)`}}),ee=new ie({}),ae=new ie({}),oe=new M({props:{code:"accelerate config",highlighted:"accelerate config"}}),re=new M({props:{code:"accelerate launch train.py",highlighted:"accelerate launch train.py"}}),te=new ie({}),se=new M({props:{code:`from accelerate import notebook_launcher notebook_launcher(training_function)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> accelerate <span class="hljs-keyword">import</span> notebook_launcher <span class="hljs-meta">&gt;&gt;&gt; </span>notebook_launcher(training_function)`}}),{c(){w=t("meta"),Oe=m(),k=t("h1"),S=t("a"),he=t("span"),u(U.$$.fragment),ga=m(),ve=t("span"),ba=c("Treinamento distribu\xEDdo com o \u{1F917} Accelerate"),De=m(),q=t("p"),wa=c(`O paralelismo surgiu como uma estrat\xE9gia para treinar modelos grandes em hardware limitado e aumentar a velocidade de treinamento em v\xE1rias \xF3rdens de magnitude. Na Hugging Face criamos a biblioteca `),F=t("a"),ka=c("\u{1F917} Accelerate"),Aa=c(` para ajudar os usu\xE1rios a treinar modelos \u{1F917} Transformers com qualquer configura\xE7\xE3o distribu\xEDda, seja em uma m\xE1quina com m\xFAltiplos GPUs ou em m\xFAltiplos GPUs distribuidos entre muitas m\xE1quinas. Neste tutorial, voc\xEA ir\xE1 aprender como personalizar seu la\xE7o de treinamento de PyTorch para poder treinar em ambientes distribu\xEDdos.`),He=m(),A=t("h2"),C=t("a"),_e=t("span"),u(B.$$.fragment),Ea=m(),$e=t("span"),ja=c("Configura\xE7\xE3o"),Ge=m(),ne=t("p"),Pa=c("De in\xEDcio, instale o \u{1F917} Accelerate:"),Le=m(),u(I.$$.fragment),Me=m(),g=t("p"),ya=c("Logo, devemos importar e criar um objeto "),W=t("a"),ge=t("code"),za=c("Accelerator"),Sa=c(`. O `),be=t("code"),qa=c("Accelerator"),Ca=c(` detectar\xE1 autom\xE1ticamente a configura\xE7\xE3o distribu\xEDda dispon\xEDvel e inicializar\xE1 todos os componentes necess\xE1rios para o treinamento. N\xE3o h\xE1 necessidade portanto de especificar o dispositivo onde deve colocar seu modelo.`),Ue=m(),u(Q.$$.fragment),Fe=m(),E=t("h2"),N=t("a"),we=t("span"),u(J.$$.fragment),Na=m(),ke=t("span"),Ta=c("Preparando a acelera\xE7\xE3o"),Be=m(),T=t("p"),xa=c("Passe todos os objetos relevantes ao treinamento para o m\xE9todo "),R=t("a"),Ae=t("code"),Oa=c("prepare"),Da=c(`. Isto inclui os DataLoaders de treino e evalua\xE7\xE3o, um modelo e um otimizador:`),Ie=m(),u(K.$$.fragment),We=m(),j=t("h2"),x=t("a"),Ee=t("span"),u(V.$$.fragment),Ha=m(),je=t("span"),Ga=c("Backward"),Qe=m(),b=t("p"),La=c("Por \xFAltimo, substitua o "),Pe=t("code"),Ma=c("loss.backward()"),Ua=c(" padr\xE3o em seu la\xE7o de treinamento com o m\xE9todo "),X=t("a"),ye=t("code"),Fa=c("backward"),Ba=c(" do \u{1F917} Accelerate:"),Je=m(),u(Y.$$.fragment),Re=m(),ce=t("p"),Ia=c(`Como se poder ver no seguinte c\xF3digo, s\xF3 precisar\xE1 adicionar quatro linhas de c\xF3digo ao seu la\xE7o de treinamento para habilitar o treinamento distribu\xEDdo!`),Ke=m(),u(Z.$$.fragment),Ve=m(),P=t("h2"),O=t("a"),ze=t("span"),u(ee.$$.fragment),Wa=m(),Se=t("span"),Qa=c("Treinamento"),Xe=m(),pe=t("p"),Ja=c("Quando tiver adicionado as linhas de c\xF3digo relevantes, inicie o treinamento por um script ou notebook como o Colab."),Ye=m(),y=t("h3"),D=t("a"),qe=t("span"),u(ae.$$.fragment),Ra=m(),Ce=t("span"),Ka=c("Treinamento em um Script"),Ze=m(),me=t("p"),Va=c("Se estiver rodando seu treinamento em um Script, execute o seguinte comando para criar e guardar um arquivo de configura\xE7\xE3o:"),ea=m(),u(oe.$$.fragment),aa=m(),de=t("p"),Xa=c("Comece o treinamento com:"),oa=m(),u(re.$$.fragment),ra=m(),z=t("h3"),H=t("a"),Ne=t("span"),u(te.$$.fragment),Ya=m(),Te=t("span"),Za=c("Treinamento em um Notebook"),ta=m(),G=t("p"),eo=c(`O \u{1F917} Accelerate pode rodar em um notebook, por exemplo, se estiver planejando usar as TPUs do Google Colab. Encapsule o c\xF3digo respons\xE1vel pelo treinamento de uma fun\xE7\xE3o e passe-o ao `),xe=t("code"),ao=c("notebook_launcher"),oo=c(":"),sa=m(),u(se.$$.fragment),la=m(),L=t("p"),ro=c("Para obter mais informa\xE7\xF5es sobre o \u{1F917} Accelerate e suas numerosas fun\xE7\xF5es, consulte a "),le=t("a"),to=c("documentaci\xF3n"),so=c("."),this.h()},l(e){const r=Ro('[data-svelte="svelte-1phssyn"]',document.head);w=s(r,"META",{name:!0,content:!0}),r.forEach(a),Oe=d(e),k=s(e,"H1",{class:!0});var na=l(k);S=s(na,"A",{id:!0,class:!0,href:!0});var io=l(S);he=s(io,"SPAN",{});var no=l(he);f(U.$$.fragment,no),no.forEach(a),io.forEach(a),ga=d(na),ve=s(na,"SPAN",{});var co=l(ve);ba=p(co,"Treinamento distribu\xEDdo com o \u{1F917} Accelerate"),co.forEach(a),na.forEach(a),De=d(e),q=s(e,"P",{});var ca=l(q);wa=p(ca,`O paralelismo surgiu como uma estrat\xE9gia para treinar modelos grandes em hardware limitado e aumentar a velocidade de treinamento em v\xE1rias \xF3rdens de magnitude. Na Hugging Face criamos a biblioteca `),F=s(ca,"A",{href:!0,rel:!0});var po=l(F);ka=p(po,"\u{1F917} Accelerate"),po.forEach(a),Aa=p(ca,` para ajudar os usu\xE1rios a treinar modelos \u{1F917} Transformers com qualquer configura\xE7\xE3o distribu\xEDda, seja em uma m\xE1quina com m\xFAltiplos GPUs ou em m\xFAltiplos GPUs distribuidos entre muitas m\xE1quinas. Neste tutorial, voc\xEA ir\xE1 aprender como personalizar seu la\xE7o de treinamento de PyTorch para poder treinar em ambientes distribu\xEDdos.`),ca.forEach(a),He=d(e),A=s(e,"H2",{class:!0});var pa=l(A);C=s(pa,"A",{id:!0,class:!0,href:!0});var mo=l(C);_e=s(mo,"SPAN",{});var uo=l(_e);f(B.$$.fragment,uo),uo.forEach(a),mo.forEach(a),Ea=d(pa),$e=s(pa,"SPAN",{});var fo=l($e);ja=p(fo,"Configura\xE7\xE3o"),fo.forEach(a),pa.forEach(a),Ge=d(e),ne=s(e,"P",{});var ho=l(ne);Pa=p(ho,"De in\xEDcio, instale o \u{1F917} Accelerate:"),ho.forEach(a),Le=d(e),f(I.$$.fragment,e),Me=d(e),g=s(e,"P",{});var ue=l(g);ya=p(ue,"Logo, devemos importar e criar um objeto "),W=s(ue,"A",{href:!0,rel:!0});var vo=l(W);ge=s(vo,"CODE",{});var _o=l(ge);za=p(_o,"Accelerator"),_o.forEach(a),vo.forEach(a),Sa=p(ue,`. O `),be=s(ue,"CODE",{});var $o=l(be);qa=p($o,"Accelerator"),$o.forEach(a),Ca=p(ue,` detectar\xE1 autom\xE1ticamente a configura\xE7\xE3o distribu\xEDda dispon\xEDvel e inicializar\xE1 todos os componentes necess\xE1rios para o treinamento. N\xE3o h\xE1 necessidade portanto de especificar o dispositivo onde deve colocar seu modelo.`),ue.forEach(a),Ue=d(e),f(Q.$$.fragment,e),Fe=d(e),E=s(e,"H2",{class:!0});var ma=l(E);N=s(ma,"A",{id:!0,class:!0,href:!0});var go=l(N);we=s(go,"SPAN",{});var bo=l(we);f(J.$$.fragment,bo),bo.forEach(a),go.forEach(a),Na=d(ma),ke=s(ma,"SPAN",{});var wo=l(ke);Ta=p(wo,"Preparando a acelera\xE7\xE3o"),wo.forEach(a),ma.forEach(a),Be=d(e),T=s(e,"P",{});var da=l(T);xa=p(da,"Passe todos os objetos relevantes ao treinamento para o m\xE9todo "),R=s(da,"A",{href:!0,rel:!0});var ko=l(R);Ae=s(ko,"CODE",{});var Ao=l(Ae);Oa=p(Ao,"prepare"),Ao.forEach(a),ko.forEach(a),Da=p(da,`. Isto inclui os DataLoaders de treino e evalua\xE7\xE3o, um modelo e um otimizador:`),da.forEach(a),Ie=d(e),f(K.$$.fragment,e),We=d(e),j=s(e,"H2",{class:!0});var ua=l(j);x=s(ua,"A",{id:!0,class:!0,href:!0});var Eo=l(x);Ee=s(Eo,"SPAN",{});var jo=l(Ee);f(V.$$.fragment,jo),jo.forEach(a),Eo.forEach(a),Ha=d(ua),je=s(ua,"SPAN",{});var Po=l(je);Ga=p(Po,"Backward"),Po.forEach(a),ua.forEach(a),Qe=d(e),b=s(e,"P",{});var fe=l(b);La=p(fe,"Por \xFAltimo, substitua o "),Pe=s(fe,"CODE",{});var yo=l(Pe);Ma=p(yo,"loss.backward()"),yo.forEach(a),Ua=p(fe," padr\xE3o em seu la\xE7o de treinamento com o m\xE9todo "),X=s(fe,"A",{href:!0,rel:!0});var zo=l(X);ye=s(zo,"CODE",{});var So=l(ye);Fa=p(So,"backward"),So.forEach(a),zo.forEach(a),Ba=p(fe," do \u{1F917} Accelerate:"),fe.forEach(a),Je=d(e),f(Y.$$.fragment,e),Re=d(e),ce=s(e,"P",{});var qo=l(ce);Ia=p(qo,`Como se poder ver no seguinte c\xF3digo, s\xF3 precisar\xE1 adicionar quatro linhas de c\xF3digo ao seu la\xE7o de treinamento para habilitar o treinamento distribu\xEDdo!`),qo.forEach(a),Ke=d(e),f(Z.$$.fragment,e),Ve=d(e),P=s(e,"H2",{class:!0});var fa=l(P);O=s(fa,"A",{id:!0,class:!0,href:!0});var Co=l(O);ze=s(Co,"SPAN",{});var No=l(ze);f(ee.$$.fragment,No),No.forEach(a),Co.forEach(a),Wa=d(fa),Se=s(fa,"SPAN",{});var To=l(Se);Qa=p(To,"Treinamento"),To.forEach(a),fa.forEach(a),Xe=d(e),pe=s(e,"P",{});var xo=l(pe);Ja=p(xo,"Quando tiver adicionado as linhas de c\xF3digo relevantes, inicie o treinamento por um script ou notebook como o Colab."),xo.forEach(a),Ye=d(e),y=s(e,"H3",{class:!0});var ha=l(y);D=s(ha,"A",{id:!0,class:!0,href:!0});var Oo=l(D);qe=s(Oo,"SPAN",{});var Do=l(qe);f(ae.$$.fragment,Do),Do.forEach(a),Oo.forEach(a),Ra=d(ha),Ce=s(ha,"SPAN",{});var Ho=l(Ce);Ka=p(Ho,"Treinamento em um Script"),Ho.forEach(a),ha.forEach(a),Ze=d(e),me=s(e,"P",{});var Go=l(me);Va=p(Go,"Se estiver rodando seu treinamento em um Script, execute o seguinte comando para criar e guardar um arquivo de configura\xE7\xE3o:"),Go.forEach(a),ea=d(e),f(oe.$$.fragment,e),aa=d(e),de=s(e,"P",{});var Lo=l(de);Xa=p(Lo,"Comece o treinamento com:"),Lo.forEach(a),oa=d(e),f(re.$$.fragment,e),ra=d(e),z=s(e,"H3",{class:!0});var va=l(z);H=s(va,"A",{id:!0,class:!0,href:!0});var Mo=l(H);Ne=s(Mo,"SPAN",{});var Uo=l(Ne);f(te.$$.fragment,Uo),Uo.forEach(a),Mo.forEach(a),Ya=d(va),Te=s(va,"SPAN",{});var Fo=l(Te);Za=p(Fo,"Treinamento em um Notebook"),Fo.forEach(a),va.forEach(a),ta=d(e),G=s(e,"P",{});var _a=l(G);eo=p(_a,`O \u{1F917} Accelerate pode rodar em um notebook, por exemplo, se estiver planejando usar as TPUs do Google Colab. Encapsule o c\xF3digo respons\xE1vel pelo treinamento de uma fun\xE7\xE3o e passe-o ao `),xe=s(_a,"CODE",{});var Bo=l(xe);ao=p(Bo,"notebook_launcher"),Bo.forEach(a),oo=p(_a,":"),_a.forEach(a),sa=d(e),f(se.$$.fragment,e),la=d(e),L=s(e,"P",{});var $a=l(L);ro=p($a,"Para obter mais informa\xE7\xF5es sobre o \u{1F917} Accelerate e suas numerosas fun\xE7\xF5es, consulte a "),le=s($a,"A",{href:!0,rel:!0});var Io=l(le);to=p(Io,"documentaci\xF3n"),Io.forEach(a),so=p($a,"."),$a.forEach(a),this.h()},h(){n(w,"name","hf:doc:metadata"),n(w,"content",JSON.stringify(Yo)),n(S,"id","treinamento-distribudo-com-o-accelerate"),n(S,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),n(S,"href","#treinamento-distribudo-com-o-accelerate"),n(k,"class","relative group"),n(F,"href","https://huggingface.co/docs/accelerate"),n(F,"rel","nofollow"),n(C,"id","configurao"),n(C,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),n(C,"href","#configurao"),n(A,"class","relative group"),n(W,"href","https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator"),n(W,"rel","nofollow"),n(N,"id","preparando-a-acelerao"),n(N,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),n(N,"href","#preparando-a-acelerao"),n(E,"class","relative group"),n(R,"href","https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.prepare"),n(R,"rel","nofollow"),n(x,"id","backward"),n(x,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),n(x,"href","#backward"),n(j,"class","relative group"),n(X,"href","https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.backward"),n(X,"rel","nofollow"),n(O,"id","treinamento"),n(O,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),n(O,"href","#treinamento"),n(P,"class","relative group"),n(D,"id","treinamento-em-um-script"),n(D,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),n(D,"href","#treinamento-em-um-script"),n(y,"class","relative group"),n(H,"id","treinamento-em-um-notebook"),n(H,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),n(H,"href","#treinamento-em-um-notebook"),n(z,"class","relative group"),n(le,"href","https://huggingface.co/docs/accelerate/index"),n(le,"rel","nofollow")},m(e,r){o(document.head,w),i(e,Oe,r),i(e,k,r),o(k,S),o(S,he),h(U,he,null),o(k,ga),o(k,ve),o(ve,ba),i(e,De,r),i(e,q,r),o(q,wa),o(q,F),o(F,ka),o(q,Aa),i(e,He,r),i(e,A,r),o(A,C),o(C,_e),h(B,_e,null),o(A,Ea),o(A,$e),o($e,ja),i(e,Ge,r),i(e,ne,r),o(ne,Pa),i(e,Le,r),h(I,e,r),i(e,Me,r),i(e,g,r),o(g,ya),o(g,W),o(W,ge),o(ge,za),o(g,Sa),o(g,be),o(be,qa),o(g,Ca),i(e,Ue,r),h(Q,e,r),i(e,Fe,r),i(e,E,r),o(E,N),o(N,we),h(J,we,null),o(E,Na),o(E,ke),o(ke,Ta),i(e,Be,r),i(e,T,r),o(T,xa),o(T,R),o(R,Ae),o(Ae,Oa),o(T,Da),i(e,Ie,r),h(K,e,r),i(e,We,r),i(e,j,r),o(j,x),o(x,Ee),h(V,Ee,null),o(j,Ha),o(j,je),o(je,Ga),i(e,Qe,r),i(e,b,r),o(b,La),o(b,Pe),o(Pe,Ma),o(b,Ua),o(b,X),o(X,ye),o(ye,Fa),o(b,Ba),i(e,Je,r),h(Y,e,r),i(e,Re,r),i(e,ce,r),o(ce,Ia),i(e,Ke,r),h(Z,e,r),i(e,Ve,r),i(e,P,r),o(P,O),o(O,ze),h(ee,ze,null),o(P,Wa),o(P,Se),o(Se,Qa),i(e,Xe,r),i(e,pe,r),o(pe,Ja),i(e,Ye,r),i(e,y,r),o(y,D),o(D,qe),h(ae,qe,null),o(y,Ra),o(y,Ce),o(Ce,Ka),i(e,Ze,r),i(e,me,r),o(me,Va),i(e,ea,r),h(oe,e,r),i(e,aa,r),i(e,de,r),o(de,Xa),i(e,oa,r),h(re,e,r),i(e,ra,r),i(e,z,r),o(z,H),o(H,Ne),h(te,Ne,null),o(z,Ya),o(z,Te),o(Te,Za),i(e,ta,r),i(e,G,r),o(G,eo),o(G,xe),o(xe,ao),o(G,oo),i(e,sa,r),h(se,e,r),i(e,la,r),i(e,L,r),o(L,ro),o(L,le),o(le,to),o(L,so),ia=!0},p:Ko,i(e){ia||(v(U.$$.fragment,e),v(B.$$.fragment,e),v(I.$$.fragment,e),v(Q.$$.fragment,e),v(J.$$.fragment,e),v(K.$$.fragment,e),v(V.$$.fragment,e),v(Y.$$.fragment,e),v(Z.$$.fragment,e),v(ee.$$.fragment,e),v(ae.$$.fragment,e),v(oe.$$.fragment,e),v(re.$$.fragment,e),v(te.$$.fragment,e),v(se.$$.fragment,e),ia=!0)},o(e){_(U.$$.fragment,e),_(B.$$.fragment,e),_(I.$$.fragment,e),_(Q.$$.fragment,e),_(J.$$.fragment,e),_(K.$$.fragment,e),_(V.$$.fragment,e),_(Y.$$.fragment,e),_(Z.$$.fragment,e),_(ee.$$.fragment,e),_(ae.$$.fragment,e),_(oe.$$.fragment,e),_(re.$$.fragment,e),_(te.$$.fragment,e),_(se.$$.fragment,e),ia=!1},d(e){a(w),e&&a(Oe),e&&a(k),$(U),e&&a(De),e&&a(q),e&&a(He),e&&a(A),$(B),e&&a(Ge),e&&a(ne),e&&a(Le),$(I,e),e&&a(Me),e&&a(g),e&&a(Ue),$(Q,e),e&&a(Fe),e&&a(E),$(J),e&&a(Be),e&&a(T),e&&a(Ie),$(K,e),e&&a(We),e&&a(j),$(V),e&&a(Qe),e&&a(b),e&&a(Je),$(Y,e),e&&a(Re),e&&a(ce),e&&a(Ke),$(Z,e),e&&a(Ve),e&&a(P),$(ee),e&&a(Xe),e&&a(pe),e&&a(Ye),e&&a(y),$(ae),e&&a(Ze),e&&a(me),e&&a(ea),$(oe,e),e&&a(aa),e&&a(de),e&&a(oa),$(re,e),e&&a(ra),e&&a(z),$(te),e&&a(ta),e&&a(G),e&&a(sa),$(se,e),e&&a(la),e&&a(L)}}}const Yo={local:"treinamento-distribudo-com-o-accelerate",sections:[{local:"configurao",title:"Configura\xE7\xE3o"},{local:"preparando-a-acelerao",title:"Preparando a acelera\xE7\xE3o"},{local:"backward",title:"Backward"},{local:"treinamento",sections:[{local:"treinamento-em-um-script",title:"Treinamento em um Script"},{local:"treinamento-em-um-notebook",title:"Treinamento em um Notebook"}],title:"Treinamento"}],title:"Treinamento distribu\xEDdo com o \u{1F917} Accelerate"};function Zo(lo){return Vo(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class rr extends Wo{constructor(w){super();Qo(this,w,Zo,Xo,Jo,{})}}export{rr as default,Yo as metadata};
479
0
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/_app
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/_app/pages/installation.mdx-hf-doc-builder.js
import{S as Nn,i as Dn,s as Hn,e as r,k as p,w as h,t as i,M as Ln,c as t,d as o,m,a as s,x as v,h as n,b as d,N as zn,G as a,g as f,y as _,q as $,o as g,B as E,v as Un}from"../chunks/vendor-hf-doc-builder.js";import{T as xt}from"../chunks/Tip-hf-doc-builder.js";import{I as _e}from"../chunks/IconCopyLink-hf-doc-builder.js";import{C as q}from"../chunks/CodeBlock-hf-doc-builder.js";function Bn(R){let c,y,u,P,T;return{c(){c=r("p"),y=i("\xC9 necess\xE1rio manter o diret\xF3rio "),u=r("code"),P=i("transformers"),T=i(" se desejas continuar usando a biblioteca.")},l(b){c=t(b,"P",{});var w=s(c);y=n(w,"\xC9 necess\xE1rio manter o diret\xF3rio "),u=t(w,"CODE",{});var A=s(u);P=n(A,"transformers"),A.forEach(o),T=n(w," se desejas continuar usando a biblioteca."),w.forEach(o)},m(b,w){f(b,c,w),a(c,y),a(c,u),a(u,P),a(c,T)},d(b){b&&o(c)}}}function Vn(R){let c,y,u,P,T,b,w,A,C,O,S;return{c(){c=r("p"),y=i("O \u{1F917} Transformers usar\xE1 as vari\xE1veis de ambiente do shell "),u=r("code"),P=i("PYTORCH_TRANSFORMERS_CACHE"),T=i(" ou "),b=r("code"),w=i("PYTORCH_PRETRAINED_BERT_CACHE"),A=i(` se estiver vindo de uma vers\xE3o anterior da biblioteca que tenha configurado essas vari\xE1veis de ambiente, a menos que voc\xEA especifique a vari\xE1vel de ambiente do shell `),C=r("code"),O=i("TRANSFORMERS_CACHE"),S=i(".")},l(j){c=t(j,"P",{});var k=s(c);y=n(k,"O \u{1F917} Transformers usar\xE1 as vari\xE1veis de ambiente do shell "),u=t(k,"CODE",{});var z=s(u);P=n(z,"PYTORCH_TRANSFORMERS_CACHE"),z.forEach(o),T=n(k," ou "),b=t(k,"CODE",{});var Po=s(b);w=n(Po,"PYTORCH_PRETRAINED_BERT_CACHE"),Po.forEach(o),A=n(k,` se estiver vindo de uma vers\xE3o anterior da biblioteca que tenha configurado essas vari\xE1veis de ambiente, a menos que voc\xEA especifique a vari\xE1vel de ambiente do shell `),C=t(k,"CODE",{});var $e=s(C);O=n($e,"TRANSFORMERS_CACHE"),$e.forEach(o),S=n(k,"."),k.forEach(o)},m(j,k){f(j,c,k),a(c,y),a(c,u),a(u,P),a(c,T),a(c,b),a(b,w),a(c,A),a(c,C),a(C,O),a(c,S)},d(j){j&&o(c)}}}function Gn(R){let c,y,u,P,T,b,w,A;return{c(){c=r("p"),y=i("Voc\xEA pode adicionar o "),u=r("a"),P=i("\u{1F917} Datasets"),T=i(` ao pipeline de treinamento offline declarando a vari\xE1vel de ambiente `),b=r("code"),w=i("HF_DATASETS_OFFLINE=1"),A=i("."),this.h()},l(C){c=t(C,"P",{});var O=s(c);y=n(O,"Voc\xEA pode adicionar o "),u=t(O,"A",{href:!0,rel:!0});var S=s(u);P=n(S,"\u{1F917} Datasets"),S.forEach(o),T=n(O,` ao pipeline de treinamento offline declarando a vari\xE1vel de ambiente `),b=t(O,"CODE",{});var j=s(b);w=n(j,"HF_DATASETS_OFFLINE=1"),j.forEach(o),A=n(O,"."),O.forEach(o),this.h()},h(){d(u,"href","https://huggingface.co/docs/datasets/"),d(u,"rel","nofollow")},m(C,O){f(C,c,O),a(c,y),a(c,u),a(u,P),a(c,T),a(c,b),a(b,w),a(c,A)},d(C){C&&o(c)}}}function Yn(R){let c,y,u,P,T;return{c(){c=r("p"),y=i("Para obter mais detalhes sobre como baixar arquivos armazenados no Hub, consulte a se\xE7\xE3o "),u=r("a"),P=i("How to download files from the Hub"),T=i("."),this.h()},l(b){c=t(b,"P",{});var w=s(c);y=n(w,"Para obter mais detalhes sobre como baixar arquivos armazenados no Hub, consulte a se\xE7\xE3o "),u=t(w,"A",{href:!0,rel:!0});var A=s(u);P=n(A,"How to download files from the Hub"),A.forEach(o),T=n(w,"."),w.forEach(o),this.h()},h(){d(u,"href","https://huggingface.co/docs/hub/how-to-downstream"),d(u,"rel","nofollow")},m(b,w){f(b,c,w),a(c,y),a(c,u),a(u,P),a(c,T)},d(b){b&&o(c)}}}function Qn(R){let c,y,u,P,T,b,w,A,C,O,S,j,k,z,Po,$e,N,ea,ge,Ct,kt,oa,Ee,St,jt,aa,be,It,Qa,U,ee,ra,we,Ft,ta,Mt,Wa,D,Rt,Pe,Nt,Dt,Te,Ht,Lt,Xa,To,zt,Ja,qe,Ka,qo,Ut,Za,ye,er,yo,Bt,or,Ae,ar,Ao,Vt,rr,Oo,Gt,tr,Oe,sr,xo,Yt,lr,xe,ir,Co,Qt,nr,Ce,fr,ko,Wt,pr,ke,mr,So,Xt,dr,Se,cr,B,oe,sa,je,Jt,la,Kt,ur,jo,Zt,hr,Ie,vr,x,es,ia,os,as,na,rs,ts,fa,ss,ls,pa,is,ns,Fe,fs,ps,_r,Io,ms,$r,Me,gr,V,ae,ma,Re,ds,da,cs,Er,Fo,us,br,re,Ne,hs,ca,vs,_s,$s,ua,gs,wr,Mo,Es,Pr,De,Tr,H,bs,ha,ws,Ps,va,Ts,qs,qr,te,yr,Ro,ys,Ar,He,Or,se,As,_a,Os,xs,xr,G,le,$a,Le,Cs,ga,ks,Cr,ie,Ss,Ea,js,Is,kr,ze,Sr,Y,ne,ba,Ue,Fs,wa,Ms,jr,I,Rs,Pa,Ns,Ds,Ta,Hs,Ls,qa,zs,Us,Ir,L,Be,Bs,ya,Vs,Gs,Ys,Q,Qs,Aa,Ws,Xs,Oa,Js,Ks,Zs,W,el,xa,ol,al,Ca,rl,tl,Fr,fe,Mr,X,pe,ka,Ve,sl,Sa,ll,Rr,me,il,ja,nl,fl,Nr,de,Dr,No,pl,Hr,Ge,Lr,Do,ml,zr,Ye,Ur,Ho,dl,Br,J,ce,Ia,Qe,cl,Fa,ul,Vr,Lo,hl,Gr,zo,We,Xe,vl,Je,_l,$l,gl,Ma,Uo,ri,Yr,Bo,Ke,K,El,Ra,bl,wl,Na,Pl,Tl,ql,Da,Ze,eo,yl,Ha,Al,Ol,xl,oo,Qr,Z,ao,ro,Cl,La,kl,Sl,jl,to,Il,so,lo,Fl,za,Ml,Rl,Nl,io,Wr,Vo,no,fo,Dl,po,Hl,Ll,zl,mo,co,uo,Ul,ho,Bl,Vl,Gl,vo,Yl,_o,M,Ql,$o,Ua,Wl,Xl,Ba,Jl,Kl,go,Zl,ei,oi,Eo,Xr,Go,ai,Jr,bo,Kr,ue,Zr;return b=new _e({}),we=new _e({}),qe=new q({props:{code:"python -m venv .env",highlighted:'python -m venv .<span class="hljs-built_in">env</span>'}}),ye=new q({props:{code:"source .env/bin/activate",highlighted:'<span class="hljs-built_in">source</span> .<span class="hljs-built_in">env</span>/bin/activate'}}),Ae=new q({props:{code:"pip install transformers",highlighted:"pip install transformers"}}),Oe=new q({props:{code:"pip install transformers[torch]",highlighted:"pip install transformers[torch]"}}),xe=new q({props:{code:"pip install transformers[tf-cpu]",highlighted:"pip install transformers[tf-cpu]"}}),Ce=new q({props:{code:"pip install transformers[flax]",highlighted:"pip install transformers[flax]"}}),ke=new q({props:{code:`python -c "from transformers import pipeline; print(pipeline('sentiment-analysis')('we love you'))"`,highlighted:'python -c <span class="hljs-string">&quot;from transformers import pipeline; print(pipeline(&#x27;sentiment-analysis&#x27;)(&#x27;we love you&#x27;))&quot;</span>'}}),Se=new q({props:{code:"[{'label': 'POSITIVE', 'score': 0.9998704791069031}]",highlighted:'[{<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;POSITIVE&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: 0.9998704791069031}]'}}),je=new _e({}),Ie=new q({props:{code:"pip install git+https://github.com/huggingface/transformers",highlighted:"pip install git+https://github.com/huggingface/transformers"}}),Me=new q({props:{code:`python -c "from transformers import pipeline; print(pipeline('sentiment-analysis')('I love you'))"`,highlighted:'python -c <span class="hljs-string">&quot;from transformers import pipeline; print(pipeline(&#x27;sentiment-analysis&#x27;)(&#x27;I love you&#x27;))&quot;</span>'}}),Re=new _e({}),De=new q({props:{code:`git clone https://github.com/huggingface/transformers.git cd transformers pip install -e .`,highlighted:`git <span class="hljs-built_in">clone</span> https://github.com/huggingface/transformers.git <span class="hljs-built_in">cd</span> transformers pip install -e .`}}),te=new xt({props:{warning:!0,$$slots:{default:[Bn]},$$scope:{ctx:R}}}),He=new q({props:{code:`cd ~/transformers/ git pull`,highlighted:`<span class="hljs-built_in">cd</span> ~/transformers/ git pull`}}),Le=new _e({}),ze=new q({props:{code:"conda install -c huggingface transformers",highlighted:"conda install -c huggingface transformers"}}),Ue=new _e({}),fe=new xt({props:{$$slots:{default:[Vn]},$$scope:{ctx:R}}}),Ve=new _e({}),de=new xt({props:{$$slots:{default:[Gn]},$$scope:{ctx:R}}}),Ge=new q({props:{code:"python examples/pytorch/translation/run_translation.py --model_name_or_path t5-small --dataset_name wmt16 --dataset_config ro-en ...",highlighted:"python examples/pytorch/translation/run_translation.py --model_name_or_path t5-small --dataset_name wmt16 --dataset_config ro-en ..."}}),Ye=new q({props:{code:`HF_DATASETS_OFFLINE=1 TRANSFORMERS_OFFLINE=1 \\ python examples/pytorch/translation/run_translation.py --model_name_or_path t5-small --dataset_name wmt16 --dataset_config ro-en ...`,highlighted:`HF_DATASETS_OFFLINE=1 TRANSFORMERS_OFFLINE=1 \\ python examples/pytorch/translation/run_translation.py --model_name_or_path t5-small --dataset_name wmt16 --dataset_config ro-en ...`}}),Qe=new _e({}),oo=new q({props:{code:`from transformers import AutoTokenizer, AutoModelForSeq2SeqLM tokenizer = AutoTokenizer.from_pretrained("bigscience/T0_3B") model = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0_3B")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, AutoModelForSeq2SeqLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;bigscience/T0_3B&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;bigscience/T0_3B&quot;</span>)`}}),to=new q({props:{code:`tokenizer.save_pretrained("./your/path/bigscience_t0") model.save_pretrained("./your/path/bigscience_t0")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.save_pretrained(<span class="hljs-string">&quot;./your/path/bigscience_t0&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.save_pretrained(<span class="hljs-string">&quot;./your/path/bigscience_t0&quot;</span>)`}}),io=new q({props:{code:`tokenizer = AutoTokenizer.from_pretrained("./your/path/bigscience_t0") model = AutoModel.from_pretrained("./your/path/bigscience_t0")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;./your/path/bigscience_t0&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModel.from_pretrained(<span class="hljs-string">&quot;./your/path/bigscience_t0&quot;</span>)`}}),vo=new q({props:{code:"python -m pip install huggingface_hub",highlighted:"python -m pip install huggingface_hub"}}),Eo=new q({props:{code:`from huggingface_hub import hf_hub_download hf_hub_download(repo_id="bigscience/T0_3B", filename="config.json", cache_dir="./your/path/bigscience_t0")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> huggingface_hub <span class="hljs-keyword">import</span> hf_hub_download <span class="hljs-meta">&gt;&gt;&gt; </span>hf_hub_download(repo_id=<span class="hljs-string">&quot;bigscience/T0_3B&quot;</span>, filename=<span class="hljs-string">&quot;config.json&quot;</span>, cache_dir=<span class="hljs-string">&quot;./your/path/bigscience_t0&quot;</span>)`}}),bo=new q({props:{code:`from transformers import AutoConfig config = AutoConfig.from_pretrained("./your/path/bigscience_t0/config.json")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&quot;./your/path/bigscience_t0/config.json&quot;</span>)`}}),ue=new xt({props:{$$slots:{default:[Yn]},$$scope:{ctx:R}}}),{c(){c=r("meta"),y=p(),u=r("h1"),P=r("a"),T=r("span"),h(b.$$.fragment),w=p(),A=r("span"),C=i("Guia de Instala\xE7\xE3o"),O=p(),S=r("p"),j=i(`Neste guia poder\xE1 encontrar informa\xE7\xF5es para a instala\xE7\xE3o do \u{1F917} Transformers para qualquer biblioteca de Machine Learning com a qual esteja a trabalhar. Al\xE9m disso, poder\xE1 encontrar informa\xE7\xF5es sobre como gerar cach\xEAs e configurar o \u{1F917} Transformers para execu\xE7\xE3o em modo offline (opcional).`),k=p(),z=r("p"),Po=i(`\u{1F917} Transformers foi testado com Python 3.6+, PyTorch 1.1.0+, TensorFlow 2.0+, e Flax. Para instalar a biblioteca de deep learning com que deseja trabalhar, siga as instru\xE7\xF5es correspondentes listadas a seguir:`),$e=p(),N=r("ul"),ea=r("li"),ge=r("a"),Ct=i("PyTorch"),kt=p(),oa=r("li"),Ee=r("a"),St=i("TensorFlow 2.0"),jt=p(),aa=r("li"),be=r("a"),It=i("Flax"),Qa=p(),U=r("h2"),ee=r("a"),ra=r("span"),h(we.$$.fragment),Ft=p(),ta=r("span"),Mt=i("Instala\xE7\xE3o pelo Pip"),Wa=p(),D=r("p"),Rt=i("\xC9 sugerido instalar o \u{1F917} Transformers num "),Pe=r("a"),Nt=i("ambiente virtual"),Dt=i(`. Se precisar de mais informa\xE7\xF5es sobre ambientes virtuais em Python, consulte este `),Te=r("a"),Ht=i("guia"),Lt=i(`. Um ambiente virtual facilitar\xE1 a manipula\xE7\xE3o e organiza\xE7\xE3o de projetos e evita problemas de compatibilidade entre depend\xEAncias.`),Xa=p(),To=r("p"),zt=i("Comece criando um ambiente virtual no diret\xF3rio do seu projeto:"),Ja=p(),h(qe.$$.fragment),Ka=p(),qo=r("p"),Ut=i("E para ativar o ambiente virtual:"),Za=p(),h(ye.$$.fragment),er=p(),yo=r("p"),Bt=i("Agora \xC9 poss\xEDvel instalar o \u{1F917} Transformers com o comando a seguir:"),or=p(),h(Ae.$$.fragment),ar=p(),Ao=r("p"),Vt=i("Somente para a CPU, \xE9 poss\xEDvel instalar o \u{1F917} Transformers e a biblioteca de deep learning respectiva apenas numa linha."),rr=p(),Oo=r("p"),Gt=i("Por exemplo, para instalar o \u{1F917} Transformers e o PyTorch, digite:"),tr=p(),h(Oe.$$.fragment),sr=p(),xo=r("p"),Yt=i("\u{1F917} Transformers e TensorFlow 2.0:"),lr=p(),h(xe.$$.fragment),ir=p(),Co=r("p"),Qt=i("\u{1F917} Transformers e Flax:"),nr=p(),h(Ce.$$.fragment),fr=p(),ko=r("p"),Wt=i("Por \xFAltimo, verifique se o \u{1F917} Transformers foi instalado com sucesso usando o seguinte comando para baixar um modelo pr\xE9-treinado:"),pr=p(),h(ke.$$.fragment),mr=p(),So=r("p"),Xt=i("Em seguida, imprima um r\xF3tulo e sua pontua\xE7\xE3o:"),dr=p(),h(Se.$$.fragment),cr=p(),B=r("h2"),oe=r("a"),sa=r("span"),h(je.$$.fragment),Jt=p(),la=r("span"),Kt=i("Instala\xE7\xE3o usando a fonte"),ur=p(),jo=r("p"),Zt=i("Para instalar o \u{1F917} Transformers a partir da fonte use o seguinte comando:"),hr=p(),h(Ie.$$.fragment),vr=p(),x=r("p"),es=i("O comando acima instalar\xE1 a vers\xE3o "),ia=r("code"),os=i("master"),as=i(" mais atual em vez da \xFAltima vers\xE3o est\xE1vel. A vers\xE3o "),na=r("code"),rs=i("master"),ts=i(` \xE9 \xFAtil para utilizar os \xFAltimos updates contidos em \u{1F917} Transformers. Por exemplo, um erro recente pode ter sido corrigido somente ap\xF3s a \xFAltima vers\xE3o est\xE1vel, antes que houvesse um novo lan\xE7amento. No entanto, h\xE1 a possibilidade que a vers\xE3o `),fa=r("code"),ss=i("master"),ls=i(` n\xE3o esteja est\xE1vel. A equipa trata de mant\xE9r a vers\xE3o `),pa=r("code"),is=i("master"),ns=i(` operacional e a maioria dos erros s\xE3o resolvidos em poucas horas ou dias. Se encontrar quaisquer problemas, por favor abra um `),Fe=r("a"),fs=i("Issue"),ps=i(` para que o mesmo possa ser corrigido o mais r\xE1pido poss\xEDvel.`),_r=p(),Io=r("p"),ms=i("Verifique que o \u{1F917} Transformers est\xE1 instalado corretamente usando o seguinte comando:"),$r=p(),h(Me.$$.fragment),gr=p(),V=r("h2"),ae=r("a"),ma=r("span"),h(Re.$$.fragment),ds=p(),da=r("span"),cs=i("Instala\xE7\xE3o edit\xE1vel"),Er=p(),Fo=r("p"),us=i("Uma instala\xE7\xE3o edit\xE1vel ser\xE1 necess\xE1ria caso desejas um dos seguintes:"),br=p(),re=r("ul"),Ne=r("li"),hs=i("Usar a vers\xE3o "),ca=r("code"),vs=i("master"),_s=i(" do c\xF3digo fonte."),$s=p(),ua=r("li"),gs=i("Contribuir ao \u{1F917} Transformers e precisa testar mudan\xE7as ao c\xF3digo."),wr=p(),Mo=r("p"),Es=i("Para tal, clone o reposit\xF3rio e instale o \u{1F917} Transformers com os seguintes comandos:"),Pr=p(),h(De.$$.fragment),Tr=p(),H=r("p"),bs=i(`Estes comandos v\xE3o ligar o diret\xF3rio para o qual foi clonado o reposit\xF3rio ao caminho de bibliotecas do Python. O Python agora buscar\xE1 dentro dos arquivos que foram clonados al\xE9m dos caminhos normais da biblioteca. Por exemplo, se os pacotes do Python se encontram instalados no caminho `),ha=r("code"),ws=i("~/anaconda3/envs/main/lib/python3.7/site-packages/"),Ps=i(`, o Python tamb\xE9m buscar\xE1 m\xF3dulos no diret\xF3rio onde clonamos o reposit\xF3rio `),va=r("code"),Ts=i("~/transformers/"),qs=i("."),qr=p(),h(te.$$.fragment),yr=p(),Ro=r("p"),ys=i("Assim, \xC9 poss\xEDvel atualizar sua c\xF3pia local para com a \xFAltima vers\xE3o do \u{1F917} Transformers com o seguinte comando:"),Ar=p(),h(He.$$.fragment),Or=p(),se=r("p"),As=i("O ambiente de Python que foi criado para a instala\xE7\xE3o do \u{1F917} Transformers encontrar\xE1 a vers\xE3o "),_a=r("code"),Os=i("master"),xs=i(" em execu\xE7\xF5es seguintes."),xr=p(),G=r("h2"),le=r("a"),$a=r("span"),h(Le.$$.fragment),Cs=p(),ga=r("span"),ks=i("Instala\xE7\xE3o usando o Conda"),Cr=p(),ie=r("p"),Ss=i("\xC9 poss\xEDvel instalar o \u{1F917} Transformers a partir do canal conda "),Ea=r("code"),js=i("huggingface"),Is=i(" com o seguinte comando:"),kr=p(),h(ze.$$.fragment),Sr=p(),Y=r("h2"),ne=r("a"),ba=r("span"),h(Ue.$$.fragment),Fs=p(),wa=r("span"),Ms=i("Configura\xE7\xE3o do Cach\xEA"),jr=p(),I=r("p"),Rs=i("Os modelos pr\xE9-treinados s\xE3o baixados e armazenados no cach\xEA local, encontrado em "),Pa=r("code"),Ns=i("~/.cache/huggingface/transformers/"),Ds=i(`. Este \xE9 o diret\xF3rio padr\xE3o determinado pela vari\xE1vel `),Ta=r("code"),Hs=i("TRANSFORMERS_CACHE"),Ls=i(` dentro do shell. No Windows, este diret\xF3rio pr\xE9-definido \xE9 dado por `),qa=r("code"),zs=i("C:\\Users\\username\\.cache\\huggingface\\transformers"),Us=i(`. \xC9 poss\xEDvel mudar as vari\xE1veis dentro do shell em ordem de prioridade para especificar um diret\xF3rio de cach\xEA diferente:`),Ir=p(),L=r("ol"),Be=r("li"),Bs=i("Vari\xE1vel de ambiente do shell (por padr\xE3o): "),ya=r("code"),Vs=i("TRANSFORMERS_CACHE"),Gs=i("."),Ys=p(),Q=r("li"),Qs=i("Vari\xE1vel de ambiente do shell:"),Aa=r("code"),Ws=i("HF_HOME"),Xs=i(" + "),Oa=r("code"),Js=i("transformers/"),Ks=i("."),Zs=p(),W=r("li"),el=i("Vari\xE1vel de ambiente do shell: "),xa=r("code"),ol=i("XDG_CACHE_HOME"),al=i(" + "),Ca=r("code"),rl=i("/huggingface/transformers"),tl=i("."),Fr=p(),h(fe.$$.fragment),Mr=p(),X=r("h2"),pe=r("a"),ka=r("span"),h(Ve.$$.fragment),sl=p(),Sa=r("span"),ll=i("Modo Offline"),Rr=p(),me=r("p"),il=i(`O \u{1F917} Transformers tamb\xE9m pode ser executado num ambiente de firewall ou fora da rede (offline) usando arquivos locais. Para tal, configure a vari\xE1vel de ambiente de modo que `),ja=r("code"),nl=i("TRANSFORMERS_OFFLINE=1"),fl=i("."),Nr=p(),h(de.$$.fragment),Dr=p(),No=r("p"),pl=i("Segue um exemplo de execu\xE7\xE3o do programa numa rede padr\xE3o com firewall para inst\xE2ncias externas, usando o seguinte comando:"),Hr=p(),h(Ge.$$.fragment),Lr=p(),Do=r("p"),ml=i("Execute esse mesmo programa numa inst\xE2ncia offline com o seguinte comando:"),zr=p(),h(Ye.$$.fragment),Ur=p(),Ho=r("p"),dl=i("O script agora deve ser executado sem travar ou expirar, pois procurar\xE1 apenas por arquivos locais."),Br=p(),J=r("h3"),ce=r("a"),Ia=r("span"),h(Qe.$$.fragment),cl=p(),Fa=r("span"),ul=i("Obtendo modelos e tokenizers para uso offline"),Vr=p(),Lo=r("p"),hl=i("Outra op\xE7\xE3o para usar o \u{1F917} Transformers offline \xE9 baixar os arquivos antes e depois apontar para o caminho local onde est\xE3o localizados. Existem tr\xEAs maneiras de fazer isso:"),Gr=p(),zo=r("ul"),We=r("li"),Xe=r("p"),vl=i("Baixe um arquivo por meio da interface de usu\xE1rio do "),Je=r("a"),_l=i("Model Hub"),$l=i(" clicando no \xEDcone \u2193."),gl=p(),Ma=r("p"),Uo=r("img"),Yr=p(),Bo=r("ul"),Ke=r("li"),K=r("p"),El=i("Use o pipeline do "),Ra=r("code"),bl=i("PreTrainedModel.from_pretrained()"),wl=i(" e "),Na=r("code"),Pl=i("PreTrainedModel.save_pretrained()"),Tl=i(":"),ql=p(),Da=r("ol"),Ze=r("li"),eo=r("p"),yl=i("Baixa os arquivos previamente com "),Ha=r("code"),Al=i("PreTrainedModel.from_pretrained()"),Ol=i(":"),xl=p(),h(oo.$$.fragment),Qr=p(),Z=r("ol"),ao=r("li"),ro=r("p"),Cl=i("Salve os arquivos em um diret\xF3rio espec\xEDfico com "),La=r("code"),kl=i("PreTrainedModel.save_pretrained()"),Sl=i(":"),jl=p(),h(to.$$.fragment),Il=p(),so=r("li"),lo=r("p"),Fl=i("Quando estiver offline, acesse os arquivos com "),za=r("code"),Ml=i("PreTrainedModel.from_pretrained()"),Rl=i(" do diret\xF3rio especificado:"),Nl=p(),h(io.$$.fragment),Wr=p(),Vo=r("ul"),no=r("li"),fo=r("p"),Dl=i("Baixando arquivos programaticamente com a biblioteca "),po=r("a"),Hl=i("huggingface_hub"),Ll=i(":"),zl=p(),mo=r("ol"),co=r("li"),uo=r("p"),Ul=i("Instale a biblioteca "),ho=r("a"),Bl=i("huggingface_hub"),Vl=i(" em seu ambiente virtual:"),Gl=p(),h(vo.$$.fragment),Yl=p(),_o=r("li"),M=r("p"),Ql=i("Utiliza a fun\xE7\xE3o "),$o=r("a"),Ua=r("code"),Wl=i("hf_hub_download"),Xl=i(" para baixar um arquivo para um caminho espec\xEDfico. Por exemplo, o comando a seguir baixar\xE1 o arquivo "),Ba=r("code"),Jl=i("config.json"),Kl=i(" para o modelo "),go=r("a"),Zl=i("T0"),ei=i(" no caminho desejado:"),oi=p(),h(Eo.$$.fragment),Xr=p(),Go=r("p"),ai=i("Depois que o arquivo for baixado e armazenado no cach\xEA local, especifique seu caminho local para carreg\xE1-lo e us\xE1-lo:"),Jr=p(),h(bo.$$.fragment),Kr=p(),h(ue.$$.fragment),this.h()},l(e){const l=Ln('[data-svelte="svelte-1phssyn"]',document.head);c=t(l,"META",{name:!0,content:!0}),l.forEach(o),y=m(e),u=t(e,"H1",{class:!0});var wo=s(u);P=t(wo,"A",{id:!0,class:!0,href:!0});var Va=s(P);T=t(Va,"SPAN",{});var Ga=s(T);v(b.$$.fragment,Ga),Ga.forEach(o),Va.forEach(o),w=m(wo),A=t(wo,"SPAN",{});var Ya=s(A);C=n(Ya,"Guia de Instala\xE7\xE3o"),Ya.forEach(o),wo.forEach(o),O=m(e),S=t(e,"P",{});var ti=s(S);j=n(ti,`Neste guia poder\xE1 encontrar informa\xE7\xF5es para a instala\xE7\xE3o do \u{1F917} Transformers para qualquer biblioteca de Machine Learning com a qual esteja a trabalhar. Al\xE9m disso, poder\xE1 encontrar informa\xE7\xF5es sobre como gerar cach\xEAs e configurar o \u{1F917} Transformers para execu\xE7\xE3o em modo offline (opcional).`),ti.forEach(o),k=m(e),z=t(e,"P",{});var si=s(z);Po=n(si,`\u{1F917} Transformers foi testado com Python 3.6+, PyTorch 1.1.0+, TensorFlow 2.0+, e Flax. Para instalar a biblioteca de deep learning com que deseja trabalhar, siga as instru\xE7\xF5es correspondentes listadas a seguir:`),si.forEach(o),$e=m(e),N=t(e,"UL",{});var Yo=s(N);ea=t(Yo,"LI",{});var li=s(ea);ge=t(li,"A",{href:!0,rel:!0});var ii=s(ge);Ct=n(ii,"PyTorch"),ii.forEach(o),li.forEach(o),kt=m(Yo),oa=t(Yo,"LI",{});var ni=s(oa);Ee=t(ni,"A",{href:!0,rel:!0});var fi=s(Ee);St=n(fi,"TensorFlow 2.0"),fi.forEach(o),ni.forEach(o),jt=m(Yo),aa=t(Yo,"LI",{});var pi=s(aa);be=t(pi,"A",{href:!0,rel:!0});var mi=s(be);It=n(mi,"Flax"),mi.forEach(o),pi.forEach(o),Yo.forEach(o),Qa=m(e),U=t(e,"H2",{class:!0});var et=s(U);ee=t(et,"A",{id:!0,class:!0,href:!0});var di=s(ee);ra=t(di,"SPAN",{});var ci=s(ra);v(we.$$.fragment,ci),ci.forEach(o),di.forEach(o),Ft=m(et),ta=t(et,"SPAN",{});var ui=s(ta);Mt=n(ui,"Instala\xE7\xE3o pelo Pip"),ui.forEach(o),et.forEach(o),Wa=m(e),D=t(e,"P",{});var Qo=s(D);Rt=n(Qo,"\xC9 sugerido instalar o \u{1F917} Transformers num "),Pe=t(Qo,"A",{href:!0,rel:!0});var hi=s(Pe);Nt=n(hi,"ambiente virtual"),hi.forEach(o),Dt=n(Qo,`. Se precisar de mais informa\xE7\xF5es sobre ambientes virtuais em Python, consulte este `),Te=t(Qo,"A",{href:!0,rel:!0});var vi=s(Te);Ht=n(vi,"guia"),vi.forEach(o),Lt=n(Qo,`. Um ambiente virtual facilitar\xE1 a manipula\xE7\xE3o e organiza\xE7\xE3o de projetos e evita problemas de compatibilidade entre depend\xEAncias.`),Qo.forEach(o),Xa=m(e),To=t(e,"P",{});var _i=s(To);zt=n(_i,"Comece criando um ambiente virtual no diret\xF3rio do seu projeto:"),_i.forEach(o),Ja=m(e),v(qe.$$.fragment,e),Ka=m(e),qo=t(e,"P",{});var $i=s(qo);Ut=n($i,"E para ativar o ambiente virtual:"),$i.forEach(o),Za=m(e),v(ye.$$.fragment,e),er=m(e),yo=t(e,"P",{});var gi=s(yo);Bt=n(gi,"Agora \xC9 poss\xEDvel instalar o \u{1F917} Transformers com o comando a seguir:"),gi.forEach(o),or=m(e),v(Ae.$$.fragment,e),ar=m(e),Ao=t(e,"P",{});var Ei=s(Ao);Vt=n(Ei,"Somente para a CPU, \xE9 poss\xEDvel instalar o \u{1F917} Transformers e a biblioteca de deep learning respectiva apenas numa linha."),Ei.forEach(o),rr=m(e),Oo=t(e,"P",{});var bi=s(Oo);Gt=n(bi,"Por exemplo, para instalar o \u{1F917} Transformers e o PyTorch, digite:"),bi.forEach(o),tr=m(e),v(Oe.$$.fragment,e),sr=m(e),xo=t(e,"P",{});var wi=s(xo);Yt=n(wi,"\u{1F917} Transformers e TensorFlow 2.0:"),wi.forEach(o),lr=m(e),v(xe.$$.fragment,e),ir=m(e),Co=t(e,"P",{});var Pi=s(Co);Qt=n(Pi,"\u{1F917} Transformers e Flax:"),Pi.forEach(o),nr=m(e),v(Ce.$$.fragment,e),fr=m(e),ko=t(e,"P",{});var Ti=s(ko);Wt=n(Ti,"Por \xFAltimo, verifique se o \u{1F917} Transformers foi instalado com sucesso usando o seguinte comando para baixar um modelo pr\xE9-treinado:"),Ti.forEach(o),pr=m(e),v(ke.$$.fragment,e),mr=m(e),So=t(e,"P",{});var qi=s(So);Xt=n(qi,"Em seguida, imprima um r\xF3tulo e sua pontua\xE7\xE3o:"),qi.forEach(o),dr=m(e),v(Se.$$.fragment,e),cr=m(e),B=t(e,"H2",{class:!0});var ot=s(B);oe=t(ot,"A",{id:!0,class:!0,href:!0});var yi=s(oe);sa=t(yi,"SPAN",{});var Ai=s(sa);v(je.$$.fragment,Ai),Ai.forEach(o),yi.forEach(o),Jt=m(ot),la=t(ot,"SPAN",{});var Oi=s(la);Kt=n(Oi,"Instala\xE7\xE3o usando a fonte"),Oi.forEach(o),ot.forEach(o),ur=m(e),jo=t(e,"P",{});var xi=s(jo);Zt=n(xi,"Para instalar o \u{1F917} Transformers a partir da fonte use o seguinte comando:"),xi.forEach(o),hr=m(e),v(Ie.$$.fragment,e),vr=m(e),x=t(e,"P",{});var F=s(x);es=n(F,"O comando acima instalar\xE1 a vers\xE3o "),ia=t(F,"CODE",{});var Ci=s(ia);os=n(Ci,"master"),Ci.forEach(o),as=n(F," mais atual em vez da \xFAltima vers\xE3o est\xE1vel. A vers\xE3o "),na=t(F,"CODE",{});var ki=s(na);rs=n(ki,"master"),ki.forEach(o),ts=n(F,` \xE9 \xFAtil para utilizar os \xFAltimos updates contidos em \u{1F917} Transformers. Por exemplo, um erro recente pode ter sido corrigido somente ap\xF3s a \xFAltima vers\xE3o est\xE1vel, antes que houvesse um novo lan\xE7amento. No entanto, h\xE1 a possibilidade que a vers\xE3o `),fa=t(F,"CODE",{});var Si=s(fa);ss=n(Si,"master"),Si.forEach(o),ls=n(F,` n\xE3o esteja est\xE1vel. A equipa trata de mant\xE9r a vers\xE3o `),pa=t(F,"CODE",{});var ji=s(pa);is=n(ji,"master"),ji.forEach(o),ns=n(F,` operacional e a maioria dos erros s\xE3o resolvidos em poucas horas ou dias. Se encontrar quaisquer problemas, por favor abra um `),Fe=t(F,"A",{href:!0,rel:!0});var Ii=s(Fe);fs=n(Ii,"Issue"),Ii.forEach(o),ps=n(F,` para que o mesmo possa ser corrigido o mais r\xE1pido poss\xEDvel.`),F.forEach(o),_r=m(e),Io=t(e,"P",{});var Fi=s(Io);ms=n(Fi,"Verifique que o \u{1F917} Transformers est\xE1 instalado corretamente usando o seguinte comando:"),Fi.forEach(o),$r=m(e),v(Me.$$.fragment,e),gr=m(e),V=t(e,"H2",{class:!0});var at=s(V);ae=t(at,"A",{id:!0,class:!0,href:!0});var Mi=s(ae);ma=t(Mi,"SPAN",{});var Ri=s(ma);v(Re.$$.fragment,Ri),Ri.forEach(o),Mi.forEach(o),ds=m(at),da=t(at,"SPAN",{});var Ni=s(da);cs=n(Ni,"Instala\xE7\xE3o edit\xE1vel"),Ni.forEach(o),at.forEach(o),Er=m(e),Fo=t(e,"P",{});var Di=s(Fo);us=n(Di,"Uma instala\xE7\xE3o edit\xE1vel ser\xE1 necess\xE1ria caso desejas um dos seguintes:"),Di.forEach(o),br=m(e),re=t(e,"UL",{});var rt=s(re);Ne=t(rt,"LI",{});var tt=s(Ne);hs=n(tt,"Usar a vers\xE3o "),ca=t(tt,"CODE",{});var Hi=s(ca);vs=n(Hi,"master"),Hi.forEach(o),_s=n(tt," do c\xF3digo fonte."),tt.forEach(o),$s=m(rt),ua=t(rt,"LI",{});var Li=s(ua);gs=n(Li,"Contribuir ao \u{1F917} Transformers e precisa testar mudan\xE7as ao c\xF3digo."),Li.forEach(o),rt.forEach(o),wr=m(e),Mo=t(e,"P",{});var zi=s(Mo);Es=n(zi,"Para tal, clone o reposit\xF3rio e instale o \u{1F917} Transformers com os seguintes comandos:"),zi.forEach(o),Pr=m(e),v(De.$$.fragment,e),Tr=m(e),H=t(e,"P",{});var Wo=s(H);bs=n(Wo,`Estes comandos v\xE3o ligar o diret\xF3rio para o qual foi clonado o reposit\xF3rio ao caminho de bibliotecas do Python. O Python agora buscar\xE1 dentro dos arquivos que foram clonados al\xE9m dos caminhos normais da biblioteca. Por exemplo, se os pacotes do Python se encontram instalados no caminho `),ha=t(Wo,"CODE",{});var Ui=s(ha);ws=n(Ui,"~/anaconda3/envs/main/lib/python3.7/site-packages/"),Ui.forEach(o),Ps=n(Wo,`, o Python tamb\xE9m buscar\xE1 m\xF3dulos no diret\xF3rio onde clonamos o reposit\xF3rio `),va=t(Wo,"CODE",{});var Bi=s(va);Ts=n(Bi,"~/transformers/"),Bi.forEach(o),qs=n(Wo,"."),Wo.forEach(o),qr=m(e),v(te.$$.fragment,e),yr=m(e),Ro=t(e,"P",{});var Vi=s(Ro);ys=n(Vi,"Assim, \xC9 poss\xEDvel atualizar sua c\xF3pia local para com a \xFAltima vers\xE3o do \u{1F917} Transformers com o seguinte comando:"),Vi.forEach(o),Ar=m(e),v(He.$$.fragment,e),Or=m(e),se=t(e,"P",{});var st=s(se);As=n(st,"O ambiente de Python que foi criado para a instala\xE7\xE3o do \u{1F917} Transformers encontrar\xE1 a vers\xE3o "),_a=t(st,"CODE",{});var Gi=s(_a);Os=n(Gi,"master"),Gi.forEach(o),xs=n(st," em execu\xE7\xF5es seguintes."),st.forEach(o),xr=m(e),G=t(e,"H2",{class:!0});var lt=s(G);le=t(lt,"A",{id:!0,class:!0,href:!0});var Yi=s(le);$a=t(Yi,"SPAN",{});var Qi=s($a);v(Le.$$.fragment,Qi),Qi.forEach(o),Yi.forEach(o),Cs=m(lt),ga=t(lt,"SPAN",{});var Wi=s(ga);ks=n(Wi,"Instala\xE7\xE3o usando o Conda"),Wi.forEach(o),lt.forEach(o),Cr=m(e),ie=t(e,"P",{});var it=s(ie);Ss=n(it,"\xC9 poss\xEDvel instalar o \u{1F917} Transformers a partir do canal conda "),Ea=t(it,"CODE",{});var Xi=s(Ea);js=n(Xi,"huggingface"),Xi.forEach(o),Is=n(it," com o seguinte comando:"),it.forEach(o),kr=m(e),v(ze.$$.fragment,e),Sr=m(e),Y=t(e,"H2",{class:!0});var nt=s(Y);ne=t(nt,"A",{id:!0,class:!0,href:!0});var Ji=s(ne);ba=t(Ji,"SPAN",{});var Ki=s(ba);v(Ue.$$.fragment,Ki),Ki.forEach(o),Ji.forEach(o),Fs=m(nt),wa=t(nt,"SPAN",{});var Zi=s(wa);Ms=n(Zi,"Configura\xE7\xE3o do Cach\xEA"),Zi.forEach(o),nt.forEach(o),jr=m(e),I=t(e,"P",{});var he=s(I);Rs=n(he,"Os modelos pr\xE9-treinados s\xE3o baixados e armazenados no cach\xEA local, encontrado em "),Pa=t(he,"CODE",{});var en=s(Pa);Ns=n(en,"~/.cache/huggingface/transformers/"),en.forEach(o),Ds=n(he,`. Este \xE9 o diret\xF3rio padr\xE3o determinado pela vari\xE1vel `),Ta=t(he,"CODE",{});var on=s(Ta);Hs=n(on,"TRANSFORMERS_CACHE"),on.forEach(o),Ls=n(he,` dentro do shell. No Windows, este diret\xF3rio pr\xE9-definido \xE9 dado por `),qa=t(he,"CODE",{});var an=s(qa);zs=n(an,"C:\\Users\\username\\.cache\\huggingface\\transformers"),an.forEach(o),Us=n(he,`. \xC9 poss\xEDvel mudar as vari\xE1veis dentro do shell em ordem de prioridade para especificar um diret\xF3rio de cach\xEA diferente:`),he.forEach(o),Ir=m(e),L=t(e,"OL",{});var Xo=s(L);Be=t(Xo,"LI",{});var ft=s(Be);Bs=n(ft,"Vari\xE1vel de ambiente do shell (por padr\xE3o): "),ya=t(ft,"CODE",{});var rn=s(ya);Vs=n(rn,"TRANSFORMERS_CACHE"),rn.forEach(o),Gs=n(ft,"."),ft.forEach(o),Ys=m(Xo),Q=t(Xo,"LI",{});var Jo=s(Q);Qs=n(Jo,"Vari\xE1vel de ambiente do shell:"),Aa=t(Jo,"CODE",{});var tn=s(Aa);Ws=n(tn,"HF_HOME"),tn.forEach(o),Xs=n(Jo," + "),Oa=t(Jo,"CODE",{});var sn=s(Oa);Js=n(sn,"transformers/"),sn.forEach(o),Ks=n(Jo,"."),Jo.forEach(o),Zs=m(Xo),W=t(Xo,"LI",{});var Ko=s(W);el=n(Ko,"Vari\xE1vel de ambiente do shell: "),xa=t(Ko,"CODE",{});var ln=s(xa);ol=n(ln,"XDG_CACHE_HOME"),ln.forEach(o),al=n(Ko," + "),Ca=t(Ko,"CODE",{});var nn=s(Ca);rl=n(nn,"/huggingface/transformers"),nn.forEach(o),tl=n(Ko,"."),Ko.forEach(o),Xo.forEach(o),Fr=m(e),v(fe.$$.fragment,e),Mr=m(e),X=t(e,"H2",{class:!0});var pt=s(X);pe=t(pt,"A",{id:!0,class:!0,href:!0});var fn=s(pe);ka=t(fn,"SPAN",{});var pn=s(ka);v(Ve.$$.fragment,pn),pn.forEach(o),fn.forEach(o),sl=m(pt),Sa=t(pt,"SPAN",{});var mn=s(Sa);ll=n(mn,"Modo Offline"),mn.forEach(o),pt.forEach(o),Rr=m(e),me=t(e,"P",{});var mt=s(me);il=n(mt,`O \u{1F917} Transformers tamb\xE9m pode ser executado num ambiente de firewall ou fora da rede (offline) usando arquivos locais. Para tal, configure a vari\xE1vel de ambiente de modo que `),ja=t(mt,"CODE",{});var dn=s(ja);nl=n(dn,"TRANSFORMERS_OFFLINE=1"),dn.forEach(o),fl=n(mt,"."),mt.forEach(o),Nr=m(e),v(de.$$.fragment,e),Dr=m(e),No=t(e,"P",{});var cn=s(No);pl=n(cn,"Segue um exemplo de execu\xE7\xE3o do programa numa rede padr\xE3o com firewall para inst\xE2ncias externas, usando o seguinte comando:"),cn.forEach(o),Hr=m(e),v(Ge.$$.fragment,e),Lr=m(e),Do=t(e,"P",{});var un=s(Do);ml=n(un,"Execute esse mesmo programa numa inst\xE2ncia offline com o seguinte comando:"),un.forEach(o),zr=m(e),v(Ye.$$.fragment,e),Ur=m(e),Ho=t(e,"P",{});var hn=s(Ho);dl=n(hn,"O script agora deve ser executado sem travar ou expirar, pois procurar\xE1 apenas por arquivos locais."),hn.forEach(o),Br=m(e),J=t(e,"H3",{class:!0});var dt=s(J);ce=t(dt,"A",{id:!0,class:!0,href:!0});var vn=s(ce);Ia=t(vn,"SPAN",{});var _n=s(Ia);v(Qe.$$.fragment,_n),_n.forEach(o),vn.forEach(o),cl=m(dt),Fa=t(dt,"SPAN",{});var $n=s(Fa);ul=n($n,"Obtendo modelos e tokenizers para uso offline"),$n.forEach(o),dt.forEach(o),Vr=m(e),Lo=t(e,"P",{});var gn=s(Lo);hl=n(gn,"Outra op\xE7\xE3o para usar o \u{1F917} Transformers offline \xE9 baixar os arquivos antes e depois apontar para o caminho local onde est\xE3o localizados. Existem tr\xEAs maneiras de fazer isso:"),gn.forEach(o),Gr=m(e),zo=t(e,"UL",{});var En=s(zo);We=t(En,"LI",{});var ct=s(We);Xe=t(ct,"P",{});var ut=s(Xe);vl=n(ut,"Baixe um arquivo por meio da interface de usu\xE1rio do "),Je=t(ut,"A",{href:!0,rel:!0});var bn=s(Je);_l=n(bn,"Model Hub"),bn.forEach(o),$l=n(ut," clicando no \xEDcone \u2193."),ut.forEach(o),gl=m(ct),Ma=t(ct,"P",{});var wn=s(Ma);Uo=t(wn,"IMG",{src:!0,alt:!0}),wn.forEach(o),ct.forEach(o),En.forEach(o),Yr=m(e),Bo=t(e,"UL",{});var Pn=s(Bo);Ke=t(Pn,"LI",{});var ht=s(Ke);K=t(ht,"P",{});var Zo=s(K);El=n(Zo,"Use o pipeline do "),Ra=t(Zo,"CODE",{});var Tn=s(Ra);bl=n(Tn,"PreTrainedModel.from_pretrained()"),Tn.forEach(o),wl=n(Zo," e "),Na=t(Zo,"CODE",{});var qn=s(Na);Pl=n(qn,"PreTrainedModel.save_pretrained()"),qn.forEach(o),Tl=n(Zo,":"),Zo.forEach(o),ql=m(ht),Da=t(ht,"OL",{});var yn=s(Da);Ze=t(yn,"LI",{});var vt=s(Ze);eo=t(vt,"P",{});var _t=s(eo);yl=n(_t,"Baixa os arquivos previamente com "),Ha=t(_t,"CODE",{});var An=s(Ha);Al=n(An,"PreTrainedModel.from_pretrained()"),An.forEach(o),Ol=n(_t,":"),_t.forEach(o),xl=m(vt),v(oo.$$.fragment,vt),vt.forEach(o),yn.forEach(o),ht.forEach(o),Pn.forEach(o),Qr=m(e),Z=t(e,"OL",{start:!0});var $t=s(Z);ao=t($t,"LI",{});var gt=s(ao);ro=t(gt,"P",{});var Et=s(ro);Cl=n(Et,"Salve os arquivos em um diret\xF3rio espec\xEDfico com "),La=t(Et,"CODE",{});var On=s(La);kl=n(On,"PreTrainedModel.save_pretrained()"),On.forEach(o),Sl=n(Et,":"),Et.forEach(o),jl=m(gt),v(to.$$.fragment,gt),gt.forEach(o),Il=m($t),so=t($t,"LI",{});var bt=s(so);lo=t(bt,"P",{});var wt=s(lo);Fl=n(wt,"Quando estiver offline, acesse os arquivos com "),za=t(wt,"CODE",{});var xn=s(za);Ml=n(xn,"PreTrainedModel.from_pretrained()"),xn.forEach(o),Rl=n(wt," do diret\xF3rio especificado:"),wt.forEach(o),Nl=m(bt),v(io.$$.fragment,bt),bt.forEach(o),$t.forEach(o),Wr=m(e),Vo=t(e,"UL",{});var Cn=s(Vo);no=t(Cn,"LI",{});var Pt=s(no);fo=t(Pt,"P",{});var Tt=s(fo);Dl=n(Tt,"Baixando arquivos programaticamente com a biblioteca "),po=t(Tt,"A",{href:!0,rel:!0});var kn=s(po);Hl=n(kn,"huggingface_hub"),kn.forEach(o),Ll=n(Tt,":"),Tt.forEach(o),zl=m(Pt),mo=t(Pt,"OL",{});var qt=s(mo);co=t(qt,"LI",{});var yt=s(co);uo=t(yt,"P",{});var At=s(uo);Ul=n(At,"Instale a biblioteca "),ho=t(At,"A",{href:!0,rel:!0});var Sn=s(ho);Bl=n(Sn,"huggingface_hub"),Sn.forEach(o),Vl=n(At," em seu ambiente virtual:"),At.forEach(o),Gl=m(yt),v(vo.$$.fragment,yt),yt.forEach(o),Yl=m(qt),_o=t(qt,"LI",{});var Ot=s(_o);M=t(Ot,"P",{});var ve=s(M);Ql=n(ve,"Utiliza a fun\xE7\xE3o "),$o=t(ve,"A",{href:!0,rel:!0});var jn=s($o);Ua=t(jn,"CODE",{});var In=s(Ua);Wl=n(In,"hf_hub_download"),In.forEach(o),jn.forEach(o),Xl=n(ve," para baixar um arquivo para um caminho espec\xEDfico. Por exemplo, o comando a seguir baixar\xE1 o arquivo "),Ba=t(ve,"CODE",{});var Fn=s(Ba);Jl=n(Fn,"config.json"),Fn.forEach(o),Kl=n(ve," para o modelo "),go=t(ve,"A",{href:!0,rel:!0});var Mn=s(go);Zl=n(Mn,"T0"),Mn.forEach(o),ei=n(ve," no caminho desejado:"),ve.forEach(o),oi=m(Ot),v(Eo.$$.fragment,Ot),Ot.forEach(o),qt.forEach(o),Pt.forEach(o),Cn.forEach(o),Xr=m(e),Go=t(e,"P",{});var Rn=s(Go);ai=n(Rn,"Depois que o arquivo for baixado e armazenado no cach\xEA local, especifique seu caminho local para carreg\xE1-lo e us\xE1-lo:"),Rn.forEach(o),Jr=m(e),v(bo.$$.fragment,e),Kr=m(e),v(ue.$$.fragment,e),this.h()},h(){d(c,"name","hf:doc:metadata"),d(c,"content",JSON.stringify(Wn)),d(P,"id","guia-de-instalao"),d(P,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(P,"href","#guia-de-instalao"),d(u,"class","relative group"),d(ge,"href","https://pytorch.org/get-started/locally/"),d(ge,"rel","nofollow"),d(Ee,"href","https://www.tensorflow.org/install/pip"),d(Ee,"rel","nofollow"),d(be,"href","https://flax.readthedocs.io/en/latest/"),d(be,"rel","nofollow"),d(ee,"id","instalao-pelo-pip"),d(ee,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(ee,"href","#instalao-pelo-pip"),d(U,"class","relative group"),d(Pe,"href","https://docs.python.org/3/library/venv.html"),d(Pe,"rel","nofollow"),d(Te,"href","https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/"),d(Te,"rel","nofollow"),d(oe,"id","instalao-usando-a-fonte"),d(oe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(oe,"href","#instalao-usando-a-fonte"),d(B,"class","relative group"),d(Fe,"href","https://github.com/huggingface/transformers/issues"),d(Fe,"rel","nofollow"),d(ae,"id","instalao-editvel"),d(ae,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(ae,"href","#instalao-editvel"),d(V,"class","relative group"),d(le,"id","instalao-usando-o-conda"),d(le,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(le,"href","#instalao-usando-o-conda"),d(G,"class","relative group"),d(ne,"id","configurao-do-cach"),d(ne,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(ne,"href","#configurao-do-cach"),d(Y,"class","relative group"),d(pe,"id","modo-offline"),d(pe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(pe,"href","#modo-offline"),d(X,"class","relative group"),d(ce,"id","obtendo-modelos-e-tokenizers-para-uso-offline"),d(ce,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(ce,"href","#obtendo-modelos-e-tokenizers-para-uso-offline"),d(J,"class","relative group"),d(Je,"href","https://huggingface.co/models"),d(Je,"rel","nofollow"),zn(Uo.src,ri="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/download-icon.png")||d(Uo,"src",ri),d(Uo,"alt","download-icon"),d(Z,"start","2"),d(po,"href","https://github.com/huggingface/huggingface_hub/tree/main/src/huggingface_hub"),d(po,"rel","nofollow"),d(ho,"href","https://github.com/huggingface/huggingface_hub/tree/main/src/huggingface_hub"),d(ho,"rel","nofollow"),d($o,"href","https://huggingface.co/docs/hub/adding-a-library#download-files-from-the-hub"),d($o,"rel","nofollow"),d(go,"href","https://huggingface.co/bigscience/T0_3B"),d(go,"rel","nofollow")},m(e,l){a(document.head,c),f(e,y,l),f(e,u,l),a(u,P),a(P,T),_(b,T,null),a(u,w),a(u,A),a(A,C),f(e,O,l),f(e,S,l),a(S,j),f(e,k,l),f(e,z,l),a(z,Po),f(e,$e,l),f(e,N,l),a(N,ea),a(ea,ge),a(ge,Ct),a(N,kt),a(N,oa),a(oa,Ee),a(Ee,St),a(N,jt),a(N,aa),a(aa,be),a(be,It),f(e,Qa,l),f(e,U,l),a(U,ee),a(ee,ra),_(we,ra,null),a(U,Ft),a(U,ta),a(ta,Mt),f(e,Wa,l),f(e,D,l),a(D,Rt),a(D,Pe),a(Pe,Nt),a(D,Dt),a(D,Te),a(Te,Ht),a(D,Lt),f(e,Xa,l),f(e,To,l),a(To,zt),f(e,Ja,l),_(qe,e,l),f(e,Ka,l),f(e,qo,l),a(qo,Ut),f(e,Za,l),_(ye,e,l),f(e,er,l),f(e,yo,l),a(yo,Bt),f(e,or,l),_(Ae,e,l),f(e,ar,l),f(e,Ao,l),a(Ao,Vt),f(e,rr,l),f(e,Oo,l),a(Oo,Gt),f(e,tr,l),_(Oe,e,l),f(e,sr,l),f(e,xo,l),a(xo,Yt),f(e,lr,l),_(xe,e,l),f(e,ir,l),f(e,Co,l),a(Co,Qt),f(e,nr,l),_(Ce,e,l),f(e,fr,l),f(e,ko,l),a(ko,Wt),f(e,pr,l),_(ke,e,l),f(e,mr,l),f(e,So,l),a(So,Xt),f(e,dr,l),_(Se,e,l),f(e,cr,l),f(e,B,l),a(B,oe),a(oe,sa),_(je,sa,null),a(B,Jt),a(B,la),a(la,Kt),f(e,ur,l),f(e,jo,l),a(jo,Zt),f(e,hr,l),_(Ie,e,l),f(e,vr,l),f(e,x,l),a(x,es),a(x,ia),a(ia,os),a(x,as),a(x,na),a(na,rs),a(x,ts),a(x,fa),a(fa,ss),a(x,ls),a(x,pa),a(pa,is),a(x,ns),a(x,Fe),a(Fe,fs),a(x,ps),f(e,_r,l),f(e,Io,l),a(Io,ms),f(e,$r,l),_(Me,e,l),f(e,gr,l),f(e,V,l),a(V,ae),a(ae,ma),_(Re,ma,null),a(V,ds),a(V,da),a(da,cs),f(e,Er,l),f(e,Fo,l),a(Fo,us),f(e,br,l),f(e,re,l),a(re,Ne),a(Ne,hs),a(Ne,ca),a(ca,vs),a(Ne,_s),a(re,$s),a(re,ua),a(ua,gs),f(e,wr,l),f(e,Mo,l),a(Mo,Es),f(e,Pr,l),_(De,e,l),f(e,Tr,l),f(e,H,l),a(H,bs),a(H,ha),a(ha,ws),a(H,Ps),a(H,va),a(va,Ts),a(H,qs),f(e,qr,l),_(te,e,l),f(e,yr,l),f(e,Ro,l),a(Ro,ys),f(e,Ar,l),_(He,e,l),f(e,Or,l),f(e,se,l),a(se,As),a(se,_a),a(_a,Os),a(se,xs),f(e,xr,l),f(e,G,l),a(G,le),a(le,$a),_(Le,$a,null),a(G,Cs),a(G,ga),a(ga,ks),f(e,Cr,l),f(e,ie,l),a(ie,Ss),a(ie,Ea),a(Ea,js),a(ie,Is),f(e,kr,l),_(ze,e,l),f(e,Sr,l),f(e,Y,l),a(Y,ne),a(ne,ba),_(Ue,ba,null),a(Y,Fs),a(Y,wa),a(wa,Ms),f(e,jr,l),f(e,I,l),a(I,Rs),a(I,Pa),a(Pa,Ns),a(I,Ds),a(I,Ta),a(Ta,Hs),a(I,Ls),a(I,qa),a(qa,zs),a(I,Us),f(e,Ir,l),f(e,L,l),a(L,Be),a(Be,Bs),a(Be,ya),a(ya,Vs),a(Be,Gs),a(L,Ys),a(L,Q),a(Q,Qs),a(Q,Aa),a(Aa,Ws),a(Q,Xs),a(Q,Oa),a(Oa,Js),a(Q,Ks),a(L,Zs),a(L,W),a(W,el),a(W,xa),a(xa,ol),a(W,al),a(W,Ca),a(Ca,rl),a(W,tl),f(e,Fr,l),_(fe,e,l),f(e,Mr,l),f(e,X,l),a(X,pe),a(pe,ka),_(Ve,ka,null),a(X,sl),a(X,Sa),a(Sa,ll),f(e,Rr,l),f(e,me,l),a(me,il),a(me,ja),a(ja,nl),a(me,fl),f(e,Nr,l),_(de,e,l),f(e,Dr,l),f(e,No,l),a(No,pl),f(e,Hr,l),_(Ge,e,l),f(e,Lr,l),f(e,Do,l),a(Do,ml),f(e,zr,l),_(Ye,e,l),f(e,Ur,l),f(e,Ho,l),a(Ho,dl),f(e,Br,l),f(e,J,l),a(J,ce),a(ce,Ia),_(Qe,Ia,null),a(J,cl),a(J,Fa),a(Fa,ul),f(e,Vr,l),f(e,Lo,l),a(Lo,hl),f(e,Gr,l),f(e,zo,l),a(zo,We),a(We,Xe),a(Xe,vl),a(Xe,Je),a(Je,_l),a(Xe,$l),a(We,gl),a(We,Ma),a(Ma,Uo),f(e,Yr,l),f(e,Bo,l),a(Bo,Ke),a(Ke,K),a(K,El),a(K,Ra),a(Ra,bl),a(K,wl),a(K,Na),a(Na,Pl),a(K,Tl),a(Ke,ql),a(Ke,Da),a(Da,Ze),a(Ze,eo),a(eo,yl),a(eo,Ha),a(Ha,Al),a(eo,Ol),a(Ze,xl),_(oo,Ze,null),f(e,Qr,l),f(e,Z,l),a(Z,ao),a(ao,ro),a(ro,Cl),a(ro,La),a(La,kl),a(ro,Sl),a(ao,jl),_(to,ao,null),a(Z,Il),a(Z,so),a(so,lo),a(lo,Fl),a(lo,za),a(za,Ml),a(lo,Rl),a(so,Nl),_(io,so,null),f(e,Wr,l),f(e,Vo,l),a(Vo,no),a(no,fo),a(fo,Dl),a(fo,po),a(po,Hl),a(fo,Ll),a(no,zl),a(no,mo),a(mo,co),a(co,uo),a(uo,Ul),a(uo,ho),a(ho,Bl),a(uo,Vl),a(co,Gl),_(vo,co,null),a(mo,Yl),a(mo,_o),a(_o,M),a(M,Ql),a(M,$o),a($o,Ua),a(Ua,Wl),a(M,Xl),a(M,Ba),a(Ba,Jl),a(M,Kl),a(M,go),a(go,Zl),a(M,ei),a(_o,oi),_(Eo,_o,null),f(e,Xr,l),f(e,Go,l),a(Go,ai),f(e,Jr,l),_(bo,e,l),f(e,Kr,l),_(ue,e,l),Zr=!0},p(e,[l]){const wo={};l&2&&(wo.$$scope={dirty:l,ctx:e}),te.$set(wo);const Va={};l&2&&(Va.$$scope={dirty:l,ctx:e}),fe.$set(Va);const Ga={};l&2&&(Ga.$$scope={dirty:l,ctx:e}),de.$set(Ga);const Ya={};l&2&&(Ya.$$scope={dirty:l,ctx:e}),ue.$set(Ya)},i(e){Zr||($(b.$$.fragment,e),$(we.$$.fragment,e),$(qe.$$.fragment,e),$(ye.$$.fragment,e),$(Ae.$$.fragment,e),$(Oe.$$.fragment,e),$(xe.$$.fragment,e),$(Ce.$$.fragment,e),$(ke.$$.fragment,e),$(Se.$$.fragment,e),$(je.$$.fragment,e),$(Ie.$$.fragment,e),$(Me.$$.fragment,e),$(Re.$$.fragment,e),$(De.$$.fragment,e),$(te.$$.fragment,e),$(He.$$.fragment,e),$(Le.$$.fragment,e),$(ze.$$.fragment,e),$(Ue.$$.fragment,e),$(fe.$$.fragment,e),$(Ve.$$.fragment,e),$(de.$$.fragment,e),$(Ge.$$.fragment,e),$(Ye.$$.fragment,e),$(Qe.$$.fragment,e),$(oo.$$.fragment,e),$(to.$$.fragment,e),$(io.$$.fragment,e),$(vo.$$.fragment,e),$(Eo.$$.fragment,e),$(bo.$$.fragment,e),$(ue.$$.fragment,e),Zr=!0)},o(e){g(b.$$.fragment,e),g(we.$$.fragment,e),g(qe.$$.fragment,e),g(ye.$$.fragment,e),g(Ae.$$.fragment,e),g(Oe.$$.fragment,e),g(xe.$$.fragment,e),g(Ce.$$.fragment,e),g(ke.$$.fragment,e),g(Se.$$.fragment,e),g(je.$$.fragment,e),g(Ie.$$.fragment,e),g(Me.$$.fragment,e),g(Re.$$.fragment,e),g(De.$$.fragment,e),g(te.$$.fragment,e),g(He.$$.fragment,e),g(Le.$$.fragment,e),g(ze.$$.fragment,e),g(Ue.$$.fragment,e),g(fe.$$.fragment,e),g(Ve.$$.fragment,e),g(de.$$.fragment,e),g(Ge.$$.fragment,e),g(Ye.$$.fragment,e),g(Qe.$$.fragment,e),g(oo.$$.fragment,e),g(to.$$.fragment,e),g(io.$$.fragment,e),g(vo.$$.fragment,e),g(Eo.$$.fragment,e),g(bo.$$.fragment,e),g(ue.$$.fragment,e),Zr=!1},d(e){o(c),e&&o(y),e&&o(u),E(b),e&&o(O),e&&o(S),e&&o(k),e&&o(z),e&&o($e),e&&o(N),e&&o(Qa),e&&o(U),E(we),e&&o(Wa),e&&o(D),e&&o(Xa),e&&o(To),e&&o(Ja),E(qe,e),e&&o(Ka),e&&o(qo),e&&o(Za),E(ye,e),e&&o(er),e&&o(yo),e&&o(or),E(Ae,e),e&&o(ar),e&&o(Ao),e&&o(rr),e&&o(Oo),e&&o(tr),E(Oe,e),e&&o(sr),e&&o(xo),e&&o(lr),E(xe,e),e&&o(ir),e&&o(Co),e&&o(nr),E(Ce,e),e&&o(fr),e&&o(ko),e&&o(pr),E(ke,e),e&&o(mr),e&&o(So),e&&o(dr),E(Se,e),e&&o(cr),e&&o(B),E(je),e&&o(ur),e&&o(jo),e&&o(hr),E(Ie,e),e&&o(vr),e&&o(x),e&&o(_r),e&&o(Io),e&&o($r),E(Me,e),e&&o(gr),e&&o(V),E(Re),e&&o(Er),e&&o(Fo),e&&o(br),e&&o(re),e&&o(wr),e&&o(Mo),e&&o(Pr),E(De,e),e&&o(Tr),e&&o(H),e&&o(qr),E(te,e),e&&o(yr),e&&o(Ro),e&&o(Ar),E(He,e),e&&o(Or),e&&o(se),e&&o(xr),e&&o(G),E(Le),e&&o(Cr),e&&o(ie),e&&o(kr),E(ze,e),e&&o(Sr),e&&o(Y),E(Ue),e&&o(jr),e&&o(I),e&&o(Ir),e&&o(L),e&&o(Fr),E(fe,e),e&&o(Mr),e&&o(X),E(Ve),e&&o(Rr),e&&o(me),e&&o(Nr),E(de,e),e&&o(Dr),e&&o(No),e&&o(Hr),E(Ge,e),e&&o(Lr),e&&o(Do),e&&o(zr),E(Ye,e),e&&o(Ur),e&&o(Ho),e&&o(Br),e&&o(J),E(Qe),e&&o(Vr),e&&o(Lo),e&&o(Gr),e&&o(zo),e&&o(Yr),e&&o(Bo),E(oo),e&&o(Qr),e&&o(Z),E(to),E(io),e&&o(Wr),e&&o(Vo),E(vo),E(Eo),e&&o(Xr),e&&o(Go),e&&o(Jr),E(bo,e),e&&o(Kr),E(ue,e)}}}const Wn={local:"guia-de-instalao",sections:[{local:"instalao-pelo-pip",title:"Instala\xE7\xE3o pelo Pip"},{local:"instalao-usando-a-fonte",title:"Instala\xE7\xE3o usando a fonte"},{local:"instalao-editvel",title:"Instala\xE7\xE3o edit\xE1vel"},{local:"instalao-usando-o-conda",title:"Instala\xE7\xE3o usando o Conda"},{local:"configurao-do-cach",title:"Configura\xE7\xE3o do Cach\xEA"},{local:"modo-offline",sections:[{local:"obtendo-modelos-e-tokenizers-para-uso-offline",title:"Obtendo modelos e tokenizers para uso offline"}],title:"Modo Offline"}],title:"Guia de Instala\xE7\xE3o"};function Xn(R){return Un(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class of extends Nn{constructor(c){super();Dn(this,c,Xn,Qn,Hn,{})}}export{of as default,Wn as metadata};
480
0
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/_app
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/_app/pages/index.mdx-hf-doc-builder.js
import{S as dve,i as sve,s as hve,e as r,k as d,w as mp,t as n,M as cve,c as a,d as t,m as s,a as l,x as pp,h as o,b as i,N as gve,f as qp,G as e,g as E,y as Dp,L as fve,q as Ap,o as Rp,B as yp,v as uve}from"../chunks/vendor-hf-doc-builder.js";import{I as bp}from"../chunks/IconCopyLink-hf-doc-builder.js";function vve(Mj){let Da,Lp,Aa,Ma,F3,Nn,jp,W3,Qp,Sp,$i,$p,wp,vt,J3,e6,t6,Y3,r6,a6,K3,l6,n6,V3,o6,Mp,Et,i6,kn,d6,s6,Cn,h6,c6,In,g6,f6,Pp,ed,u6,Gp,Ra,Pa,Z3,Bn,v6,X3,E6,Np,ya,ua,Pj,T6,kp,Cp,ba,Ga,z3,xn,_6,H3,m6,Ip,td,p6,Bp,T,U3,rd,q3,D6,A6,R6,j3,ad,Q3,y6,b6,L6,$3,ld,e5,S6,w6,M6,t5,nd,r5,P6,G6,N6,a5,od,l5,k6,C6,I6,n5,id,o5,B6,x6,O6,i5,dd,d5,F6,W6,J6,s5,sd,h5,Y6,K6,xp,hd,V6,Op,La,Na,c5,On,Z6,g5,X6,Fp,h,ka,f5,cd,z6,H6,Fn,U6,q6,j6,Ca,u5,gd,Q6,$6,Wn,e7,t7,r7,Ia,v5,fd,a7,l7,Jn,n7,o7,i7,Ba,E5,ud,d7,s7,Yn,h7,c7,g7,xa,T5,vd,f7,u7,Kn,v7,E7,T7,Oa,_5,Ed,_7,m7,Vn,p7,D7,A7,Fa,m5,Td,R7,y7,Zn,b7,L7,S7,Wa,p5,_d,w7,M7,Xn,P7,G7,N7,Ja,D5,md,k7,C7,zn,I7,B7,x7,Ya,A5,pd,O7,F7,Hn,W7,J7,Y7,Ka,R5,Dd,K7,V7,Un,Z7,X7,z7,Va,y5,Ad,H7,U7,qn,q7,j7,Q7,Za,b5,Rd,$7,e8,jn,t8,r8,a8,Xa,L5,yd,l8,n8,Qn,o8,i8,d8,va,S5,bd,s8,h8,$n,c8,g8,w5,f8,u8,v8,za,M5,Ld,E8,T8,eo,_8,m8,p8,Ha,P5,Sd,D8,A8,to,R8,y8,b8,Ua,G5,wd,L8,S8,ro,w8,M8,P8,qa,N5,Md,G8,N8,ao,k8,C8,I8,ja,k5,Pd,B8,x8,lo,O8,F8,W8,Ea,C5,Gd,J8,Y8,no,K8,V8,I5,Z8,X8,z8,Qa,B5,Nd,H8,U8,oo,q8,j8,Q8,$a,x5,kd,$8,e9,io,t9,r9,a9,el,O5,Cd,l9,n9,so,o9,i9,d9,tl,F5,Id,s9,h9,ho,c9,g9,f9,rl,W5,Bd,u9,v9,co,E9,T9,_9,al,J5,xd,m9,p9,go,D9,A9,R9,ll,Y5,Od,y9,b9,fo,L9,S9,w9,nl,K5,Fd,M9,P9,uo,G9,N9,k9,ut,V5,Wd,C9,I9,vo,B9,x9,Eo,O9,F9,To,W9,J9,_o,Y9,K9,V9,ol,Z5,Jd,Z9,X9,mo,z9,H9,U9,il,X5,Yd,q9,j9,po,Q9,$9,eD,dl,z5,Kd,tD,rD,Do,aD,lD,nD,sl,H5,Vd,oD,iD,Ao,dD,sD,hD,hl,U5,Zd,cD,gD,Ro,fD,uD,vD,cl,q5,Xd,ED,TD,yo,_D,mD,pD,gl,j5,zd,DD,AD,bo,RD,yD,bD,fl,Q5,Hd,LD,SD,Lo,wD,MD,PD,ul,$5,Ud,GD,ND,So,kD,CD,ID,Tt,e4,qd,BD,xD,wo,OD,FD,t4,WD,JD,r4,YD,KD,VD,vl,a4,jd,ZD,XD,Mo,zD,HD,UD,El,l4,Qd,qD,jD,Po,QD,$D,eA,Tl,n4,$d,tA,rA,Go,aA,lA,nA,_l,o4,es,oA,iA,No,dA,sA,hA,ml,i4,ts,cA,gA,ko,fA,uA,vA,pl,d4,rs,EA,TA,Co,_A,mA,pA,Dl,s4,as,DA,AA,Io,RA,yA,bA,Al,h4,ls,LA,SA,Bo,wA,MA,PA,Rl,c4,ns,GA,NA,xo,kA,CA,IA,yl,g4,os,BA,xA,Oo,OA,FA,WA,bl,f4,is,JA,YA,Fo,KA,VA,ZA,Ll,u4,ds,XA,zA,Wo,HA,UA,qA,Sl,v4,ss,jA,QA,Jo,$A,eR,tR,wl,E4,hs,rR,aR,Yo,lR,nR,oR,Ta,T4,cs,iR,dR,Ko,sR,hR,Vo,cR,gR,fR,Ml,_4,gs,uR,vR,Zo,ER,TR,_R,Pl,m4,fs,mR,pR,Xo,DR,AR,RR,Gl,p4,us,yR,bR,zo,LR,SR,wR,Nl,D4,vs,MR,PR,Ho,GR,NR,kR,kl,A4,Es,CR,IR,Uo,BR,xR,OR,Cl,R4,Ts,FR,WR,qo,JR,YR,KR,Il,y4,_s,VR,ZR,jo,XR,zR,HR,Bl,b4,ms,UR,qR,Qo,jR,QR,$R,xl,L4,ps,ey,ty,$o,ry,ay,ly,Ol,S4,Ds,ny,oy,ei,iy,dy,sy,Fl,w4,As,hy,cy,ti,gy,fy,uy,Wl,M4,Rs,vy,Ey,ri,Ty,_y,my,Jl,P4,ys,py,Dy,ai,Ay,Ry,yy,Yl,G4,bs,by,Ly,li,Sy,wy,My,Kl,N4,Ls,Py,Gy,ni,Ny,ky,Cy,Vl,k4,Ss,Iy,By,oi,xy,Oy,Fy,Zl,C4,ws,Wy,Jy,ii,Yy,Ky,Vy,Xl,I4,Ms,Zy,Xy,di,zy,Hy,Uy,zl,B4,Ps,qy,jy,si,Qy,$y,eb,Hl,x4,Gs,tb,rb,hi,ab,lb,nb,Ul,O4,Ns,ob,ib,ci,db,sb,hb,ql,F4,ks,cb,gb,gi,fb,ub,vb,jl,W4,Cs,Eb,Tb,fi,_b,mb,pb,Ql,J4,Is,Db,Ab,ui,Rb,yb,bb,$l,Y4,Bs,Lb,Sb,vi,wb,Mb,Pb,en,K4,xs,Gb,Nb,Ei,kb,Cb,Ib,tn,V4,Os,Bb,xb,Ti,Ob,Fb,Wb,rn,Z4,Fs,Jb,Yb,_i,Kb,Vb,Zb,an,X4,Ws,Xb,zb,mi,Hb,Ub,qb,ln,z4,Js,jb,Qb,pi,$b,eL,tL,nn,H4,Ys,rL,aL,Di,lL,nL,oL,on,U4,Ks,iL,dL,Ai,sL,hL,cL,dn,q4,Vs,gL,fL,Ri,uL,vL,EL,sn,j4,Zs,TL,_L,yi,mL,pL,DL,_a,Q4,Xs,AL,RL,bi,yL,bL,$4,LL,SL,wL,hn,em,zs,ML,PL,Li,GL,NL,kL,cn,tm,Hs,CL,IL,Si,BL,xL,OL,gn,rm,Us,FL,WL,wi,JL,YL,KL,fn,am,qs,VL,ZL,Mi,XL,zL,HL,un,lm,js,UL,qL,Pi,jL,QL,$L,vn,nm,Qs,eS,tS,Gi,rS,aS,lS,En,om,$s,nS,oS,Ni,iS,dS,sS,Tn,im,eh,hS,cS,ki,gS,fS,uS,_n,dm,th,vS,ES,Ci,TS,_S,mS,mn,sm,rh,pS,DS,Ii,AS,RS,yS,pn,hm,ah,bS,LS,Bi,SS,wS,MS,Dn,cm,lh,PS,GS,xi,NS,kS,CS,An,gm,nh,IS,BS,Oi,xS,OS,FS,Rn,fm,oh,WS,JS,Fi,YS,KS,VS,ma,um,ih,ZS,XS,Wi,zS,HS,vm,US,qS,jS,yn,Em,dh,QS,$S,Ji,ew,tw,rw,pa,Tm,sh,aw,lw,Yi,nw,ow,_m,iw,dw,sw,bn,mm,hh,hw,cw,Ki,gw,fw,uw,Ln,pm,ch,vw,Ew,Vi,Tw,_w,mw,Sn,Dm,gh,pw,Dw,Zi,Aw,Rw,Wp,Sa,wn,Am,Xi,yw,Rm,bw,Jp,fh,Lw,Yp,Mn,ym,m,uh,Sw,ww,vh,Mw,Pw,Eh,Gw,Nw,Th,kw,Cw,_h,Iw,Bw,mh,xw,Ow,g,p,ph,Fw,Ww,Dh,Jw,Yw,Ah,Kw,Vw,Rh,Zw,Xw,yh,zw,Hw,bh,Uw,qw,D,Lh,jw,Qw,Sh,$w,eM,wh,tM,rM,Mh,aM,lM,Ph,nM,oM,Gh,iM,dM,A,Nh,sM,hM,kh,cM,gM,Ch,fM,uM,Ih,vM,EM,Bh,TM,_M,xh,mM,pM,R,Oh,DM,AM,Fh,RM,yM,Wh,bM,LM,Jh,SM,wM,Yh,MM,PM,Kh,GM,NM,y,Vh,kM,CM,Zh,IM,BM,Xh,xM,OM,zh,FM,WM,Hh,JM,YM,Uh,KM,VM,b,qh,ZM,XM,jh,zM,HM,Qh,UM,qM,$h,jM,QM,ec,$M,eP,tc,tP,rP,L,rc,aP,lP,ac,nP,oP,lc,iP,dP,nc,sP,hP,oc,cP,gP,ic,fP,uP,S,dc,vP,EP,sc,TP,_P,hc,mP,pP,cc,DP,AP,gc,RP,yP,fc,bP,LP,w,uc,SP,wP,vc,MP,PP,Ec,GP,NP,Tc,kP,CP,_c,IP,BP,mc,xP,OP,M,pc,FP,WP,Dc,JP,YP,Ac,KP,VP,Rc,ZP,XP,yc,zP,HP,bc,UP,qP,P,Lc,jP,QP,Sc,$P,eG,wc,tG,rG,Mc,aG,lG,Pc,nG,oG,Gc,iG,dG,G,Nc,sG,hG,kc,cG,gG,Cc,fG,uG,Ic,vG,EG,Bc,TG,_G,xc,mG,pG,N,Oc,DG,AG,Fc,RG,yG,Wc,bG,LG,Jc,SG,wG,Yc,MG,PG,Kc,GG,NG,k,Vc,kG,CG,Zc,IG,BG,Xc,xG,OG,zc,FG,WG,Hc,JG,YG,Uc,KG,VG,C,qc,ZG,XG,jc,zG,HG,Qc,UG,qG,$c,jG,QG,eg,$G,eN,tg,tN,rN,I,rg,aN,lN,ag,nN,oN,lg,iN,dN,ng,sN,hN,og,cN,gN,ig,fN,uN,B,dg,vN,EN,sg,TN,_N,hg,mN,pN,cg,DN,AN,gg,RN,yN,fg,bN,LN,x,ug,SN,wN,vg,MN,PN,Eg,GN,NN,Tg,kN,CN,_g,IN,BN,mg,xN,ON,O,pg,FN,WN,Dg,JN,YN,Ag,KN,VN,Rg,ZN,XN,yg,zN,HN,bg,UN,qN,F,Lg,jN,QN,Sg,$N,ek,wg,tk,rk,Mg,ak,lk,Pg,nk,ok,Gg,ik,dk,W,Ng,sk,hk,kg,ck,gk,Cg,fk,uk,Ig,vk,Ek,Bg,Tk,_k,xg,mk,pk,J,Og,Dk,Ak,Fg,Rk,yk,Wg,bk,Lk,Jg,Sk,wk,Yg,Mk,Pk,Kg,Gk,Nk,Y,Vg,kk,Ck,Zg,Ik,Bk,Xg,xk,Ok,zg,Fk,Wk,Hg,Jk,Yk,Ug,Kk,Vk,K,qg,Zk,Xk,jg,zk,Hk,Qg,Uk,qk,$g,jk,Qk,e1,$k,eC,t1,tC,rC,V,r1,aC,lC,a1,nC,oC,l1,iC,dC,n1,sC,hC,o1,cC,gC,i1,fC,uC,Z,d1,vC,EC,s1,TC,_C,h1,mC,pC,c1,DC,AC,g1,RC,yC,f1,bC,LC,X,u1,SC,wC,v1,MC,PC,E1,GC,NC,T1,kC,CC,_1,IC,BC,m1,xC,OC,z,p1,FC,WC,D1,JC,YC,A1,KC,VC,R1,ZC,XC,y1,zC,HC,b1,UC,qC,H,L1,jC,QC,S1,$C,eI,w1,tI,rI,M1,aI,lI,P1,nI,oI,G1,iI,dI,U,N1,sI,hI,k1,cI,gI,C1,fI,uI,I1,vI,EI,B1,TI,_I,x1,mI,pI,q,O1,DI,AI,F1,RI,yI,W1,bI,LI,J1,SI,wI,Y1,MI,PI,K1,GI,NI,j,V1,kI,CI,Z1,II,BI,X1,xI,OI,z1,FI,WI,H1,JI,YI,U1,KI,VI,Q,q1,ZI,XI,j1,zI,HI,Q1,UI,qI,$1,jI,QI,ef,$I,eB,tf,tB,rB,$,rf,aB,lB,af,nB,oB,lf,iB,dB,nf,sB,hB,of,cB,gB,df,fB,uB,ee,sf,vB,EB,hf,TB,_B,cf,mB,pB,gf,DB,AB,ff,RB,yB,uf,bB,LB,te,vf,SB,wB,Ef,MB,PB,Tf,GB,NB,_f,kB,CB,mf,IB,BB,pf,xB,OB,re,Df,FB,WB,Af,JB,YB,Rf,KB,VB,yf,ZB,XB,bf,zB,HB,Lf,UB,qB,ae,Sf,jB,QB,wf,$B,ex,Mf,tx,rx,Pf,ax,lx,Gf,nx,ox,Nf,ix,dx,le,kf,sx,hx,Cf,cx,gx,If,fx,ux,Bf,vx,Ex,xf,Tx,_x,Of,mx,px,ne,Ff,Dx,Ax,Wf,Rx,yx,Jf,bx,Lx,Yf,Sx,wx,Kf,Mx,Px,Vf,Gx,Nx,oe,Zf,kx,Cx,Xf,Ix,Bx,zf,xx,Ox,Hf,Fx,Wx,Uf,Jx,Yx,qf,Kx,Vx,ie,jf,Zx,Xx,Qf,zx,Hx,$f,Ux,qx,eu,jx,Qx,tu,$x,eO,ru,tO,rO,de,au,aO,lO,lu,nO,oO,nu,iO,dO,ou,sO,hO,iu,cO,gO,du,fO,uO,se,su,vO,EO,hu,TO,_O,cu,mO,pO,gu,DO,AO,fu,RO,yO,uu,bO,LO,he,vu,SO,wO,Eu,MO,PO,Tu,GO,NO,_u,kO,CO,mu,IO,BO,pu,xO,OO,ce,Du,FO,WO,Au,JO,YO,Ru,KO,VO,yu,ZO,XO,bu,zO,HO,Lu,UO,qO,ge,Su,jO,QO,wu,$O,eF,Mu,tF,rF,Pu,aF,lF,Gu,nF,oF,Nu,iF,dF,fe,ku,sF,hF,Cu,cF,gF,Iu,fF,uF,Bu,vF,EF,xu,TF,_F,Ou,mF,pF,ue,Fu,DF,AF,Wu,RF,yF,Ju,bF,LF,Yu,SF,wF,Ku,MF,PF,Vu,GF,NF,ve,Zu,kF,CF,Xu,IF,BF,zu,xF,OF,Hu,FF,WF,Uu,JF,YF,qu,KF,VF,Ee,ju,ZF,XF,Qu,zF,HF,$u,UF,qF,ev,jF,QF,tv,$F,eW,rv,tW,rW,Te,av,aW,lW,lv,nW,oW,nv,iW,dW,ov,sW,hW,iv,cW,gW,dv,fW,uW,_e,sv,vW,EW,hv,TW,_W,cv,mW,pW,gv,DW,AW,fv,RW,yW,uv,bW,LW,me,vv,SW,wW,Ev,MW,PW,Tv,GW,NW,_v,kW,CW,mv,IW,BW,pv,xW,OW,pe,Dv,FW,WW,Av,JW,YW,Rv,KW,VW,yv,ZW,XW,bv,zW,HW,Lv,UW,qW,De,Sv,jW,QW,wv,$W,eJ,Mv,tJ,rJ,Pv,aJ,lJ,Gv,nJ,oJ,Nv,iJ,dJ,Ae,kv,sJ,hJ,Cv,cJ,gJ,Iv,fJ,uJ,Bv,vJ,EJ,xv,TJ,_J,Ov,mJ,pJ,Re,Fv,DJ,AJ,Wv,RJ,yJ,Jv,bJ,LJ,Yv,SJ,wJ,Kv,MJ,PJ,Vv,GJ,NJ,ye,Zv,kJ,CJ,Xv,IJ,BJ,zv,xJ,OJ,Hv,FJ,WJ,Uv,JJ,YJ,qv,KJ,VJ,be,jv,ZJ,XJ,Qv,zJ,HJ,$v,UJ,qJ,eE,jJ,QJ,tE,$J,eY,rE,tY,rY,Le,aE,aY,lY,lE,nY,oY,nE,iY,dY,oE,sY,hY,iE,cY,gY,dE,fY,uY,Se,sE,vY,EY,hE,TY,_Y,cE,mY,pY,gE,DY,AY,fE,RY,yY,uE,bY,LY,we,vE,SY,wY,EE,MY,PY,TE,GY,NY,_E,kY,CY,mE,IY,BY,pE,xY,OY,Me,DE,FY,WY,AE,JY,YY,RE,KY,VY,yE,ZY,XY,bE,zY,HY,LE,UY,qY,Pe,SE,jY,QY,wE,$Y,eK,ME,tK,rK,PE,aK,lK,GE,nK,oK,NE,iK,dK,Ge,kE,sK,hK,CE,cK,gK,IE,fK,uK,BE,vK,EK,xE,TK,_K,OE,mK,pK,Ne,FE,DK,AK,WE,RK,yK,JE,bK,LK,YE,SK,wK,KE,MK,PK,VE,GK,NK,ke,ZE,kK,CK,XE,IK,BK,zE,xK,OK,HE,FK,WK,UE,JK,YK,qE,KK,VK,Ce,jE,ZK,XK,QE,zK,HK,$E,UK,qK,e2,jK,QK,t2,$K,eV,r2,tV,rV,Ie,a2,aV,lV,l2,nV,oV,n2,iV,dV,o2,sV,hV,i2,cV,gV,d2,fV,uV,Be,s2,vV,EV,h2,TV,_V,c2,mV,pV,g2,DV,AV,f2,RV,yV,u2,bV,LV,xe,v2,SV,wV,E2,MV,PV,T2,GV,NV,_2,kV,CV,m2,IV,BV,p2,xV,OV,Oe,D2,FV,WV,A2,JV,YV,R2,KV,VV,y2,ZV,XV,b2,zV,HV,L2,UV,qV,Fe,S2,jV,QV,w2,$V,eZ,M2,tZ,rZ,P2,aZ,lZ,G2,nZ,oZ,N2,iZ,dZ,We,k2,sZ,hZ,C2,cZ,gZ,I2,fZ,uZ,B2,vZ,EZ,x2,TZ,_Z,O2,mZ,pZ,Je,F2,DZ,AZ,W2,RZ,yZ,J2,bZ,LZ,Y2,SZ,wZ,K2,MZ,PZ,V2,GZ,NZ,Ye,Z2,kZ,CZ,X2,IZ,BZ,z2,xZ,OZ,H2,FZ,WZ,U2,JZ,YZ,q2,KZ,VZ,Ke,j2,ZZ,XZ,Q2,zZ,HZ,$2,UZ,qZ,eT,jZ,QZ,tT,$Z,eX,rT,tX,rX,Ve,aT,aX,lX,lT,nX,oX,nT,iX,dX,oT,sX,hX,iT,cX,gX,dT,fX,uX,Ze,sT,vX,EX,hT,TX,_X,cT,mX,pX,gT,DX,AX,fT,RX,yX,uT,bX,LX,Xe,vT,SX,wX,ET,MX,PX,TT,GX,NX,_T,kX,CX,mT,IX,BX,pT,xX,OX,ze,DT,FX,WX,AT,JX,YX,RT,KX,VX,yT,ZX,XX,bT,zX,HX,LT,UX,qX,He,ST,jX,QX,wT,$X,ez,MT,tz,rz,PT,az,lz,GT,nz,oz,NT,iz,dz,Ue,kT,sz,hz,CT,cz,gz,IT,fz,uz,BT,vz,Ez,xT,Tz,_z,OT,mz,pz,qe,FT,Dz,Az,WT,Rz,yz,JT,bz,Lz,YT,Sz,wz,KT,Mz,Pz,VT,Gz,Nz,je,ZT,kz,Cz,XT,Iz,Bz,zT,xz,Oz,HT,Fz,Wz,UT,Jz,Yz,qT,Kz,Vz,Qe,jT,Zz,Xz,QT,zz,Hz,$T,Uz,qz,e_,jz,Qz,t_,$z,eH,r_,tH,rH,$e,a_,aH,lH,l_,nH,oH,n_,iH,dH,o_,sH,hH,i_,cH,gH,d_,fH,uH,et,s_,vH,EH,h_,TH,_H,c_,mH,pH,g_,DH,AH,f_,RH,yH,u_,bH,LH,tt,v_,SH,wH,E_,MH,PH,T_,GH,NH,__,kH,CH,m_,IH,BH,p_,xH,OH,rt,D_,FH,WH,A_,JH,YH,R_,KH,VH,y_,ZH,XH,b_,zH,HH,L_,UH,qH,at,S_,jH,QH,w_,$H,eU,M_,tU,rU,P_,aU,lU,G_,nU,oU,N_,iU,dU,lt,k_,sU,hU,C_,cU,gU,I_,fU,uU,B_,vU,EU,x_,TU,_U,O_,mU,pU,nt,F_,DU,AU,W_,RU,yU,J_,bU,LU,Y_,SU,wU,K_,MU,PU,V_,GU,NU,ot,Z_,kU,CU,X_,IU,BU,z_,xU,OU,H_,FU,WU,U_,JU,YU,q_,KU,VU,it,j_,ZU,XU,Q_,zU,HU,$_,UU,qU,e3,jU,QU,t3,$U,eq,r3,tq,rq,dt,a3,aq,lq,l3,nq,oq,n3,iq,dq,o3,sq,hq,i3,cq,gq,d3,fq,uq,st,s3,vq,Eq,h3,Tq,_q,c3,mq,pq,g3,Dq,Aq,f3,Rq,yq,u3,bq,Lq,ht,v3,Sq,wq,E3,Mq,Pq,T3,Gq,Nq,_3,kq,Cq,m3,Iq,Bq,p3,xq,Oq,ct,D3,Fq,Wq,A3,Jq,Yq,R3,Kq,Vq,y3,Zq,Xq,b3,zq,Hq,L3,Uq,qq,gt,S3,jq,Qq,w3,$q,ej,M3,tj,rj,P3,aj,lj,G3,nj,oj,N3,ij,dj,ft,k3,sj,hj,C3,cj,gj,I3,fj,uj,B3,vj,Ej,x3,Tj,_j,O3,mj,Kp;return Nn=new bp({}),Bn=new bp({}),xn=new bp({}),On=new bp({}),Xi=new bp({}),{c(){Da=r("meta"),Lp=d(),Aa=r("h1"),Ma=r("a"),F3=r("span"),mp(Nn.$$.fragment),jp=d(),W3=r("span"),Qp=n("\u{1F917} Transformers"),Sp=d(),$i=r("p"),$p=n(`Estado da Arte para Aprendizado de M\xE1quina em PyTorch, TensorFlow e JAX. O \u{1F917} Transformers disponibiliza APIs para facilmente baixar e treinar modelos pr\xE9-treinados de \xFAltima gera\xE7\xE3o. O uso de modelos pr\xE9-treinados pode diminuir os seus custos de computa\xE7\xE3o, a sua pegada de carbono, al\xE9m de economizar o tempo necess\xE1rio para se treinar um modelo do zero. Os modelos podem ser usados para diversas tarefas:`),wp=d(),vt=r("ul"),J3=r("li"),e6=n("\u{1F4DD} Textos: classifica\xE7\xE3o, extra\xE7\xE3o de informa\xE7\xF5es, perguntas e respostas, resumir, traduzir e gerar textos em mais de 100 idiomas."),t6=d(),Y3=r("li"),r6=n("\u{1F5BC} Imagens: classifica\xE7\xE3o, dete\xE7\xE3o de objetos, e segmenta\xE7\xE3o."),a6=d(),K3=r("li"),l6=n("\u{1F5E3} Audio: reconhecimento de fala e classifica\xE7\xE3o de \xE1udio."),n6=d(),V3=r("li"),o6=n(`\u{1F419} Multimodal: perguntas tabeladas e respsostas, reconhecimento \xF3tico de charact\xE9res, extra\xE7\xE3o de informa\xE7\xE3o de documentos escaneados, classifica\xE7\xE3o de v\xEDdeo, perguntas e respostas visuais.`),Mp=d(),Et=r("p"),i6=n(`Nossa biblioteca aceita integra\xE7\xE3o cont\xEDnua entre tr\xEAs das bibliotecas mais populares de aprendizado profundo: Our library supports seamless integration between three of the most popular deep learning libraries: `),kn=r("a"),d6=n("PyTorch"),s6=n(", "),Cn=r("a"),h6=n("TensorFlow"),c6=n(" e "),In=r("a"),g6=n("JAX"),f6=n(`. Treine seu modelo em tr\xEAs linhas de c\xF3digo em um framework, e carregue-o para execu\xE7\xE3o em outro.`),Pp=d(),ed=r("p"),u6=n("Cada arquitetura \u{1F917} Transformers \xE9 definida em um m\xF3dulo individual do Python, para que seja facilmente customiz\xE1vel para pesquisa e experimentos."),Gp=d(),Ra=r("h2"),Pa=r("a"),Z3=r("span"),mp(Bn.$$.fragment),v6=d(),X3=r("span"),E6=n("Se voc\xEA estiver procurando suporte do time da Hugging Face, acesse"),Np=d(),ya=r("a"),ua=r("img"),T6=n(`&lt;/img> `),kp=r("br"),Cp=d(),ba=r("h2"),Ga=r("a"),z3=r("span"),mp(xn.$$.fragment),_6=d(),H3=r("span"),m6=n("Conte\xFAdo"),Ip=d(),td=r("p"),p6=n("A documenta\xE7\xE3o \xE9 dividida em cinco partes:"),Bp=d(),T=r("ul"),U3=r("li"),rd=r("p"),q3=r("strong"),D6=n("IN\xCDCIO"),A6=n(" cont\xE9m um tour r\xE1pido de instala\xE7\xE3o e instru\xE7\xF5es para te dar um empurr\xE3o inicial com os \u{1F917} Transformers."),R6=d(),j3=r("li"),ad=r("p"),Q3=r("strong"),y6=n("TUTORIAIS"),b6=n(` s\xE3o perfeitos para come\xE7ar a aprender sobre a nossa biblioteca. Essa se\xE7\xE3o ir\xE1 te ajudar a desenvolver habilidades b\xE1sicas necess\xE1rias para usar o \u{1F917} Transformers.`),L6=d(),$3=r("li"),ld=r("p"),e5=r("strong"),S6=n("GUIAS PR\xC1TICOS"),w6=n(` ir\xE3o te mostrar como alcan\xE7ar um certo objetivo, como o fine-tuning de um modelo pr\xE9-treinado para modelamento de idioma, ou como criar um cabe\xE7alho personalizado para um modelo.`),M6=d(),t5=r("li"),nd=r("p"),r5=r("strong"),P6=n("GUIAS CONCEITUAIS"),G6=n(` te dar\xE3o mais discuss\xF5es e explica\xE7\xF5es dos conceitos fundamentais e id\xE9ias por tr\xE1s dos modelos, tarefas e da filosofia de design por tr\xE1s do \u{1F917} Transformers.`),N6=d(),a5=r("li"),od=r("p"),l5=r("strong"),k6=n("API"),C6=n(" descreve o funcionamento de cada classe e fun\xE7\xE3o, agrupada em:"),I6=d(),n5=r("li"),id=r("p"),o5=r("strong"),B6=n("CLASSES PRINCIPAIS"),x6=n(" para as classes que exp\xF5e as APIs importantes da biblioteca."),O6=d(),i5=r("li"),dd=r("p"),d5=r("strong"),F6=n("MODELOS"),W6=n(" para as classes e fun\xE7\xF5es relacionadas \xE0 cada modelo implementado na biblioteca."),J6=d(),s5=r("li"),sd=r("p"),h5=r("strong"),Y6=n("AUXILIARES INTERNOS"),K6=n(" para as classes e fun\xE7\xF5es usadas internamente."),xp=d(),hd=r("p"),V6=n("Atualmente a biblioteca cont\xE9m implementa\xE7\xF5es do PyTorch, TensorFlow e JAX, pesos para modelos pr\xE9-treinados e scripts de uso e convers\xE3o de utilidades para os seguintes modelos:"),Op=d(),La=r("h3"),Na=r("a"),c5=r("span"),mp(On.$$.fragment),Z6=d(),g5=r("span"),X6=n("Modelos atuais"),Fp=d(),h=r("ol"),ka=r("li"),f5=r("strong"),cd=r("a"),z6=n("ALBERT"),H6=n(" (from Google Research and the Toyota Technological Institute at Chicago) released with the paper "),Fn=r("a"),U6=n("ALBERT: A Lite BERT for Self-supervised Learning of Language Representations"),q6=n(", by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."),j6=d(),Ca=r("li"),u5=r("strong"),gd=r("a"),Q6=n("BART"),$6=n(" (from Facebook) released with the paper "),Wn=r("a"),e7=n("BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension"),t7=n(" by Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov and Luke Zettlemoyer."),r7=d(),Ia=r("li"),v5=r("strong"),fd=r("a"),a7=n("BARThez"),l7=n(" (from \xC9cole polytechnique) released with the paper "),Jn=r("a"),n7=n("BARThez: a Skilled Pretrained French Sequence-to-Sequence Model"),o7=n(" by Moussa Kamal Eddine, Antoine J.-P. Tixier, Michalis Vazirgiannis."),i7=d(),Ba=r("li"),E5=r("strong"),ud=r("a"),d7=n("BARTpho"),s7=n(" (from VinAI Research) released with the paper "),Yn=r("a"),h7=n("BARTpho: Pre-trained Sequence-to-Sequence Models for Vietnamese"),c7=n(" by Nguyen Luong Tran, Duong Minh Le and Dat Quoc Nguyen."),g7=d(),xa=r("li"),T5=r("strong"),vd=r("a"),f7=n("BEiT"),u7=n(" (from Microsoft) released with the paper "),Kn=r("a"),v7=n("BEiT: BERT Pre-Training of Image Transformers"),E7=n(" by Hangbo Bao, Li Dong, Furu Wei."),T7=d(),Oa=r("li"),_5=r("strong"),Ed=r("a"),_7=n("BERT"),m7=n(" (from Google) released with the paper "),Vn=r("a"),p7=n("BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding"),D7=n(" by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova."),A7=d(),Fa=r("li"),m5=r("strong"),Td=r("a"),R7=n("BERTweet"),y7=n(" (from VinAI Research) released with the paper "),Zn=r("a"),b7=n("BERTweet: A pre-trained language model for English Tweets"),L7=n(" by Dat Quoc Nguyen, Thanh Vu and Anh Tuan Nguyen."),S7=d(),Wa=r("li"),p5=r("strong"),_d=r("a"),w7=n("BERT For Sequence Generation"),M7=n(" (from Google) released with the paper "),Xn=r("a"),P7=n("Leveraging Pre-trained Checkpoints for Sequence Generation Tasks"),G7=n(" by Sascha Rothe, Shashi Narayan, Aliaksei Severyn."),N7=d(),Ja=r("li"),D5=r("strong"),md=r("a"),k7=n("BigBird-RoBERTa"),C7=n(" (from Google Research) released with the paper "),zn=r("a"),I7=n("Big Bird: Transformers for Longer Sequences"),B7=n(" by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed."),x7=d(),Ya=r("li"),A5=r("strong"),pd=r("a"),O7=n("BigBird-Pegasus"),F7=n(" (from Google Research) released with the paper "),Hn=r("a"),W7=n("Big Bird: Transformers for Longer Sequences"),J7=n(" by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed."),Y7=d(),Ka=r("li"),R5=r("strong"),Dd=r("a"),K7=n("Blenderbot"),V7=n(" (from Facebook) released with the paper "),Un=r("a"),Z7=n("Recipes for building an open-domain chatbot"),X7=n(" by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston."),z7=d(),Va=r("li"),y5=r("strong"),Ad=r("a"),H7=n("BlenderbotSmall"),U7=n(" (from Facebook) released with the paper "),qn=r("a"),q7=n("Recipes for building an open-domain chatbot"),j7=n(" by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston."),Q7=d(),Za=r("li"),b5=r("strong"),Rd=r("a"),$7=n("BORT"),e8=n(" (from Alexa) released with the paper "),jn=r("a"),t8=n("Optimal Subarchitecture Extraction For BERT"),r8=n(" by Adrian de Wynter and Daniel J. Perry."),a8=d(),Xa=r("li"),L5=r("strong"),yd=r("a"),l8=n("ByT5"),n8=n(" (from Google Research) released with the paper "),Qn=r("a"),o8=n("ByT5: Towards a token-free future with pre-trained byte-to-byte models"),i8=n(" by Linting Xue, Aditya Barua, Noah Constant, Rami Al-Rfou, Sharan Narang, Mihir Kale, Adam Roberts, Colin Raffel."),d8=d(),va=r("li"),S5=r("strong"),bd=r("a"),s8=n("CamemBERT"),h8=n(" (from Inria/Facebook/Sorbonne) released with the paper "),$n=r("a"),c8=n("CamemBERT: a Tasty French Language Model"),g8=n(" by Louis Martin"),w5=r("em"),f8=n(", Benjamin Muller"),u8=n(", Pedro Javier Ortiz Su\xE1rez*, Yoann Dupont, Laurent Romary, \xC9ric Villemonte de la Clergerie, Djam\xE9 Seddah and Beno\xEEt Sagot."),v8=d(),za=r("li"),M5=r("strong"),Ld=r("a"),E8=n("CANINE"),T8=n(" (from Google Research) released with the paper "),eo=r("a"),_8=n("CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation"),m8=n(" by Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting."),p8=d(),Ha=r("li"),P5=r("strong"),Sd=r("a"),D8=n("ConvNeXT"),A8=n(" (from Facebook AI) released with the paper "),to=r("a"),R8=n("A ConvNet for the 2020s"),y8=n(" by Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie."),b8=d(),Ua=r("li"),G5=r("strong"),wd=r("a"),L8=n("CLIP"),S8=n(" (from OpenAI) released with the paper "),ro=r("a"),w8=n("Learning Transferable Visual Models From Natural Language Supervision"),M8=n(" by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever."),P8=d(),qa=r("li"),N5=r("strong"),Md=r("a"),G8=n("ConvBERT"),N8=n(" (from YituTech) released with the paper "),ao=r("a"),k8=n("ConvBERT: Improving BERT with Span-based Dynamic Convolution"),C8=n(" by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan."),I8=d(),ja=r("li"),k5=r("strong"),Pd=r("a"),B8=n("CPM"),x8=n(" (from Tsinghua University) released with the paper "),lo=r("a"),O8=n("CPM: A Large-scale Generative Chinese Pre-trained Language Model"),F8=n(" by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun."),W8=d(),Ea=r("li"),C5=r("strong"),Gd=r("a"),J8=n("CTRL"),Y8=n(" (from Salesforce) released with the paper "),no=r("a"),K8=n("CTRL: A Conditional Transformer Language Model for Controllable Generation"),V8=n(" by Nitish Shirish Keskar"),I5=r("em"),Z8=n(", Bryan McCann"),X8=n(", Lav R. Varshney, Caiming Xiong and Richard Socher."),z8=d(),Qa=r("li"),B5=r("strong"),Nd=r("a"),H8=n("Data2Vec"),U8=n(" (from Facebook) released with the paper "),oo=r("a"),q8=n("Data2Vec: A General Framework for Self-supervised Learning in Speech, Vision and Language"),j8=n(" by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu, Michael Auli."),Q8=d(),$a=r("li"),x5=r("strong"),kd=r("a"),$8=n("DeBERTa"),e9=n(" (from Microsoft) released with the paper "),io=r("a"),t9=n("DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),r9=n(" by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen."),a9=d(),el=r("li"),O5=r("strong"),Cd=r("a"),l9=n("DeBERTa-v2"),n9=n(" (from Microsoft) released with the paper "),so=r("a"),o9=n("DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),i9=n(" by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen."),d9=d(),tl=r("li"),F5=r("strong"),Id=r("a"),s9=n("Decision Transformer"),h9=n(" (from Berkeley/Facebook/Google) released with the paper "),ho=r("a"),c9=n("Decision Transformer: Reinforcement Learning via Sequence Modeling"),g9=n(" by Lili Chen, Kevin Lu, Aravind Rajeswaran, Kimin Lee, Aditya Grover, Michael Laskin, Pieter Abbeel, Aravind Srinivas, Igor Mordatch."),f9=d(),rl=r("li"),W5=r("strong"),Bd=r("a"),u9=n("DiT"),v9=n(" (from Microsoft Research) released with the paper "),co=r("a"),E9=n("DiT: Self-supervised Pre-training for Document Image Transformer"),T9=n(" by Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei."),_9=d(),al=r("li"),J5=r("strong"),xd=r("a"),m9=n("DeiT"),p9=n(" (from Facebook) released with the paper "),go=r("a"),D9=n("Training data-efficient image transformers & distillation through attention"),A9=n(" by Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Herv\xE9 J\xE9gou."),R9=d(),ll=r("li"),Y5=r("strong"),Od=r("a"),y9=n("DETR"),b9=n(" (from Facebook) released with the paper "),fo=r("a"),L9=n("End-to-End Object Detection with Transformers"),S9=n(" by Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, Sergey Zagoruyko."),w9=d(),nl=r("li"),K5=r("strong"),Fd=r("a"),M9=n("DialoGPT"),P9=n(" (from Microsoft Research) released with the paper "),uo=r("a"),G9=n("DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation"),N9=n(" by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan."),k9=d(),ut=r("li"),V5=r("strong"),Wd=r("a"),C9=n("DistilBERT"),I9=n(" (from HuggingFace), released together with the paper "),vo=r("a"),B9=n("DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter"),x9=n(" by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into "),Eo=r("a"),O9=n("DistilGPT2"),F9=n(", RoBERTa into "),To=r("a"),W9=n("DistilRoBERTa"),J9=n(", Multilingual BERT into "),_o=r("a"),Y9=n("DistilmBERT"),K9=n(" and a German version of DistilBERT."),V9=d(),ol=r("li"),Z5=r("strong"),Jd=r("a"),Z9=n("DPR"),X9=n(" (from Facebook) released with the paper "),mo=r("a"),z9=n("Dense Passage Retrieval for Open-Domain Question Answering"),H9=n(" by Vladimir Karpukhin, Barlas O\u011Fuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih."),U9=d(),il=r("li"),X5=r("strong"),Yd=r("a"),q9=n("DPT"),j9=n(" (from Intel Labs) released with the paper "),po=r("a"),Q9=n("Vision Transformers for Dense Prediction"),$9=n(" by Ren\xE9 Ranftl, Alexey Bochkovskiy, Vladlen Koltun."),eD=d(),dl=r("li"),z5=r("strong"),Kd=r("a"),tD=n("EncoderDecoder"),rD=n(" (from Google Research) released with the paper "),Do=r("a"),aD=n("Leveraging Pre-trained Checkpoints for Sequence Generation Tasks"),lD=n(" by Sascha Rothe, Shashi Narayan, Aliaksei Severyn."),nD=d(),sl=r("li"),H5=r("strong"),Vd=r("a"),oD=n("ELECTRA"),iD=n(" (from Google Research/Stanford University) released with the paper "),Ao=r("a"),dD=n("ELECTRA: Pre-training text encoders as discriminators rather than generators"),sD=n(" by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning."),hD=d(),hl=r("li"),U5=r("strong"),Zd=r("a"),cD=n("FlauBERT"),gD=n(" (from CNRS) released with the paper "),Ro=r("a"),fD=n("FlauBERT: Unsupervised Language Model Pre-training for French"),uD=n(" by Hang Le, Lo\xEFc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Beno\xEEt Crabb\xE9, Laurent Besacier, Didier Schwab."),vD=d(),cl=r("li"),q5=r("strong"),Xd=r("a"),ED=n("FNet"),TD=n(" (from Google Research) released with the paper "),yo=r("a"),_D=n("FNet: Mixing Tokens with Fourier Transforms"),mD=n(" by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon."),pD=d(),gl=r("li"),j5=r("strong"),zd=r("a"),DD=n("Funnel Transformer"),AD=n(" (from CMU/Google Brain) released with the paper "),bo=r("a"),RD=n("Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing"),yD=n(" by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le."),bD=d(),fl=r("li"),Q5=r("strong"),Hd=r("a"),LD=n("GLPN"),SD=n(" (from KAIST) released with the paper "),Lo=r("a"),wD=n("Global-Local Path Networks for Monocular Depth Estimation with Vertical CutDepth"),MD=n(" by Doyeon Kim, Woonghyun Ga, Pyungwhan Ahn, Donggyu Joo, Sehwan Chun, Junmo Kim."),PD=d(),ul=r("li"),$5=r("strong"),Ud=r("a"),GD=n("GPT"),ND=n(" (from OpenAI) released with the paper "),So=r("a"),kD=n("Improving Language Understanding by Generative Pre-Training"),CD=n(" by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever."),ID=d(),Tt=r("li"),e4=r("strong"),qd=r("a"),BD=n("GPT-2"),xD=n(" (from OpenAI) released with the paper "),wo=r("a"),OD=n("Language Models are Unsupervised Multitask Learners"),FD=n(" by Alec Radford"),t4=r("em"),WD=n(", Jeffrey Wu"),JD=n(", Rewon Child, David Luan, Dario Amodei"),r4=r("strong"),YD=n("and Ilya Sutskever"),KD=n("."),VD=d(),vl=r("li"),a4=r("strong"),jd=r("a"),ZD=n("GPT-J"),XD=n(" (from EleutherAI) released in the repository "),Mo=r("a"),zD=n("kingoflolz/mesh-transformer-jax"),HD=n(" by Ben Wang and Aran Komatsuzaki."),UD=d(),El=r("li"),l4=r("strong"),Qd=r("a"),qD=n("GPT Neo"),jD=n(" (from EleutherAI) released in the repository "),Po=r("a"),QD=n("EleutherAI/gpt-neo"),$D=n(" by Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy."),eA=d(),Tl=r("li"),n4=r("strong"),$d=r("a"),tA=n("Hubert"),rA=n(" (from Facebook) released with the paper "),Go=r("a"),aA=n("HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units"),lA=n(" by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed."),nA=d(),_l=r("li"),o4=r("strong"),es=r("a"),oA=n("I-BERT"),iA=n(" (from Berkeley) released with the paper "),No=r("a"),dA=n("I-BERT: Integer-only BERT Quantization"),sA=n(" by Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W. Mahoney, Kurt Keutzer."),hA=d(),ml=r("li"),i4=r("strong"),ts=r("a"),cA=n("ImageGPT"),gA=n(" (from OpenAI) released with the paper "),ko=r("a"),fA=n("Generative Pretraining from Pixels"),uA=n(" by Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, Ilya Sutskever."),vA=d(),pl=r("li"),d4=r("strong"),rs=r("a"),EA=n("LayoutLM"),TA=n(" (from Microsoft Research Asia) released with the paper "),Co=r("a"),_A=n("LayoutLM: Pre-training of Text and Layout for Document Image Understanding"),mA=n(" by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou."),pA=d(),Dl=r("li"),s4=r("strong"),as=r("a"),DA=n("LayoutLMv2"),AA=n(" (from Microsoft Research Asia) released with the paper "),Io=r("a"),RA=n("LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding"),yA=n(" by Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou."),bA=d(),Al=r("li"),h4=r("strong"),ls=r("a"),LA=n("LayoutXLM"),SA=n(" (from Microsoft Research Asia) released with the paper "),Bo=r("a"),wA=n("LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding"),MA=n(" by Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei."),PA=d(),Rl=r("li"),c4=r("strong"),ns=r("a"),GA=n("LED"),NA=n(" (from AllenAI) released with the paper "),xo=r("a"),kA=n("Longformer: The Long-Document Transformer"),CA=n(" by Iz Beltagy, Matthew E. Peters, Arman Cohan."),IA=d(),yl=r("li"),g4=r("strong"),os=r("a"),BA=n("Longformer"),xA=n(" (from AllenAI) released with the paper "),Oo=r("a"),OA=n("Longformer: The Long-Document Transformer"),FA=n(" by Iz Beltagy, Matthew E. Peters, Arman Cohan."),WA=d(),bl=r("li"),f4=r("strong"),is=r("a"),JA=n("LUKE"),YA=n(" (from Studio Ousia) released with the paper "),Fo=r("a"),KA=n("LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention"),VA=n(" by Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda, Yuji Matsumoto."),ZA=d(),Ll=r("li"),u4=r("strong"),ds=r("a"),XA=n("mLUKE"),zA=n(" (from Studio Ousia) released with the paper "),Wo=r("a"),HA=n("mLUKE: The Power of Entity Representations in Multilingual Pretrained Language Models"),UA=n(" by Ryokan Ri, Ikuya Yamada, and Yoshimasa Tsuruoka."),qA=d(),Sl=r("li"),v4=r("strong"),ss=r("a"),jA=n("LXMERT"),QA=n(" (from UNC Chapel Hill) released with the paper "),Jo=r("a"),$A=n("LXMERT: Learning Cross-Modality Encoder Representations from Transformers for Open-Domain Question Answering"),eR=n(" by Hao Tan and Mohit Bansal."),tR=d(),wl=r("li"),E4=r("strong"),hs=r("a"),rR=n("M2M100"),aR=n(" (from Facebook) released with the paper "),Yo=r("a"),lR=n("Beyond English-Centric Multilingual Machine Translation"),nR=n(" by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin."),oR=d(),Ta=r("li"),T4=r("strong"),cs=r("a"),iR=n("MarianMT"),dR=n(" Machine translation models trained using "),Ko=r("a"),sR=n("OPUS"),hR=n(" data by J\xF6rg Tiedemann. The "),Vo=r("a"),cR=n("Marian Framework"),gR=n(" is being developed by the Microsoft Translator Team."),fR=d(),Ml=r("li"),_4=r("strong"),gs=r("a"),uR=n("MaskFormer"),vR=n(" (from Meta and UIUC) released with the paper "),Zo=r("a"),ER=n("Per-Pixel Classification is Not All You Need for Semantic Segmentation"),TR=n(" by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov."),_R=d(),Pl=r("li"),m4=r("strong"),fs=r("a"),mR=n("MBart"),pR=n(" (from Facebook) released with the paper "),Xo=r("a"),DR=n("Multilingual Denoising Pre-training for Neural Machine Translation"),AR=n(" by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer."),RR=d(),Gl=r("li"),p4=r("strong"),us=r("a"),yR=n("MBart-50"),bR=n(" (from Facebook) released with the paper "),zo=r("a"),LR=n("Multilingual Translation with Extensible Multilingual Pretraining and Finetuning"),SR=n(" by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan."),wR=d(),Nl=r("li"),D4=r("strong"),vs=r("a"),MR=n("Megatron-BERT"),PR=n(" (from NVIDIA) released with the paper "),Ho=r("a"),GR=n("Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism"),NR=n(" by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro."),kR=d(),kl=r("li"),A4=r("strong"),Es=r("a"),CR=n("Megatron-GPT2"),IR=n(" (from NVIDIA) released with the paper "),Uo=r("a"),BR=n("Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism"),xR=n(" by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro."),OR=d(),Cl=r("li"),R4=r("strong"),Ts=r("a"),FR=n("MPNet"),WR=n(" (from Microsoft Research) released with the paper "),qo=r("a"),JR=n("MPNet: Masked and Permuted Pre-training for Language Understanding"),YR=n(" by Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, Tie-Yan Liu."),KR=d(),Il=r("li"),y4=r("strong"),_s=r("a"),VR=n("MT5"),ZR=n(" (from Google AI) released with the paper "),jo=r("a"),XR=n("mT5: A massively multilingual pre-trained text-to-text transformer"),zR=n(" by Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel."),HR=d(),Bl=r("li"),b4=r("strong"),ms=r("a"),UR=n("Nystr\xF6mformer"),qR=n(" (from the University of Wisconsin - Madison) released with the paper "),Qo=r("a"),jR=n("Nystr\xF6mformer: A Nystr\xF6m-Based Algorithm for Approximating Self-Attention"),QR=n(" by Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, Vikas Singh."),$R=d(),xl=r("li"),L4=r("strong"),ps=r("a"),ey=n("Pegasus"),ty=n(" (from Google) released with the paper "),$o=r("a"),ry=n("PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization"),ay=n(" by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu."),ly=d(),Ol=r("li"),S4=r("strong"),Ds=r("a"),ny=n("Perceiver IO"),oy=n(" (from Deepmind) released with the paper "),ei=r("a"),iy=n("Perceiver IO: A General Architecture for Structured Inputs & Outputs"),dy=n(" by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier H\xE9naff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, Jo\xE3o Carreira."),sy=d(),Fl=r("li"),w4=r("strong"),As=r("a"),hy=n("PhoBERT"),cy=n(" (from VinAI Research) released with the paper "),ti=r("a"),gy=n("PhoBERT: Pre-trained language models for Vietnamese"),fy=n(" by Dat Quoc Nguyen and Anh Tuan Nguyen."),uy=d(),Wl=r("li"),M4=r("strong"),Rs=r("a"),vy=n("PLBart"),Ey=n(" (from UCLA NLP) released with the paper "),ri=r("a"),Ty=n("Unified Pre-training for Program Understanding and Generation"),_y=n(" by Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang."),my=d(),Jl=r("li"),P4=r("strong"),ys=r("a"),py=n("PoolFormer"),Dy=n(" (from Sea AI Labs) released with the paper "),ai=r("a"),Ay=n("MetaFormer is Actually What You Need for Vision"),Ry=n(" by Yu, Weihao and Luo, Mi and Zhou, Pan and Si, Chenyang and Zhou, Yichen and Wang, Xinchao and Feng, Jiashi and Yan, Shuicheng."),yy=d(),Yl=r("li"),G4=r("strong"),bs=r("a"),by=n("ProphetNet"),Ly=n(" (from Microsoft Research) released with the paper "),li=r("a"),Sy=n("ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training"),wy=n(" by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou."),My=d(),Kl=r("li"),N4=r("strong"),Ls=r("a"),Py=n("QDQBert"),Gy=n(" (from NVIDIA) released with the paper "),ni=r("a"),Ny=n("Integer Quantization for Deep Learning Inference: Principles and Empirical Evaluation"),ky=n(" by Hao Wu, Patrick Judd, Xiaojie Zhang, Mikhail Isaev and Paulius Micikevicius."),Cy=d(),Vl=r("li"),k4=r("strong"),Ss=r("a"),Iy=n("REALM"),By=n(" (from Google Research) released with the paper "),oi=r("a"),xy=n("REALM: Retrieval-Augmented Language Model Pre-Training"),Oy=n(" by Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat and Ming-Wei Chang."),Fy=d(),Zl=r("li"),C4=r("strong"),ws=r("a"),Wy=n("Reformer"),Jy=n(" (from Google Research) released with the paper "),ii=r("a"),Yy=n("Reformer: The Efficient Transformer"),Ky=n(" by Nikita Kitaev, \u0141ukasz Kaiser, Anselm Levskaya."),Vy=d(),Xl=r("li"),I4=r("strong"),Ms=r("a"),Zy=n("RemBERT"),Xy=n(" (from Google Research) released with the paper "),di=r("a"),zy=n("Rethinking embedding coupling in pre-trained language models"),Hy=n(" by Hyung Won Chung, Thibault F\xE9vry, Henry Tsai, M. Johnson, Sebastian Ruder."),Uy=d(),zl=r("li"),B4=r("strong"),Ps=r("a"),qy=n("RegNet"),jy=n(" (from META Platforms) released with the paper "),si=r("a"),Qy=n("Designing Network Design Space"),$y=n(" by Ilija Radosavovic, Raj Prateek Kosaraju, Ross Girshick, Kaiming He, Piotr Doll\xE1r."),eb=d(),Hl=r("li"),x4=r("strong"),Gs=r("a"),tb=n("ResNet"),rb=n(" (from Microsoft Research) released with the paper "),hi=r("a"),ab=n("Deep Residual Learning for Image Recognition"),lb=n(" by Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun."),nb=d(),Ul=r("li"),O4=r("strong"),Ns=r("a"),ob=n("RoBERTa"),ib=n(" (from Facebook), released together with the paper "),ci=r("a"),db=n("RoBERTa: A Robustly Optimized BERT Pretraining Approach"),sb=n(" by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov."),hb=d(),ql=r("li"),F4=r("strong"),ks=r("a"),cb=n("RoFormer"),gb=n(" (from ZhuiyiTechnology), released together with the paper "),gi=r("a"),fb=n("RoFormer: Enhanced Transformer with Rotary Position Embedding"),ub=n(" by Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu."),vb=d(),jl=r("li"),W4=r("strong"),Cs=r("a"),Eb=n("SegFormer"),Tb=n(" (from NVIDIA) released with the paper "),fi=r("a"),_b=n("SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers"),mb=n(" by Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo."),pb=d(),Ql=r("li"),J4=r("strong"),Is=r("a"),Db=n("SEW"),Ab=n(" (from ASAPP) released with the paper "),ui=r("a"),Rb=n("Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition"),yb=n(" by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi."),bb=d(),$l=r("li"),Y4=r("strong"),Bs=r("a"),Lb=n("SEW-D"),Sb=n(" (from ASAPP) released with the paper "),vi=r("a"),wb=n("Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition"),Mb=n(" by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi."),Pb=d(),en=r("li"),K4=r("strong"),xs=r("a"),Gb=n("SpeechToTextTransformer"),Nb=n(" (from Facebook), released together with the paper "),Ei=r("a"),kb=n("fairseq S2T: Fast Speech-to-Text Modeling with fairseq"),Cb=n(" by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Dmytro Okhonko, Juan Pino."),Ib=d(),tn=r("li"),V4=r("strong"),Os=r("a"),Bb=n("SpeechToTextTransformer2"),xb=n(" (from Facebook), released together with the paper "),Ti=r("a"),Ob=n("Large-Scale Self- and Semi-Supervised Learning for Speech Translation"),Fb=n(" by Changhan Wang, Anne Wu, Juan Pino, Alexei Baevski, Michael Auli, Alexis Conneau."),Wb=d(),rn=r("li"),Z4=r("strong"),Fs=r("a"),Jb=n("Splinter"),Yb=n(" (from Tel Aviv University), released together with the paper "),_i=r("a"),Kb=n("Few-Shot Question Answering by Pretraining Span Selection"),Vb=n(" by Ori Ram, Yuval Kirstain, Jonathan Berant, Amir Globerson, Omer Levy."),Zb=d(),an=r("li"),X4=r("strong"),Ws=r("a"),Xb=n("SqueezeBert"),zb=n(" (from Berkeley) released with the paper "),mi=r("a"),Hb=n("SqueezeBERT: What can computer vision teach NLP about efficient neural networks?"),Ub=n(" by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer."),qb=d(),ln=r("li"),z4=r("strong"),Js=r("a"),jb=n("Swin Transformer"),Qb=n(" (from Microsoft) released with the paper "),pi=r("a"),$b=n("Swin Transformer: Hierarchical Vision Transformer using Shifted Windows"),eL=n(" by Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, Baining Guo."),tL=d(),nn=r("li"),H4=r("strong"),Ys=r("a"),rL=n("T5"),aL=n(" (from Google AI) released with the paper "),Di=r("a"),lL=n("Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer"),nL=n(" by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu."),oL=d(),on=r("li"),U4=r("strong"),Ks=r("a"),iL=n("T5v1.1"),dL=n(" (from Google AI) released in the repository "),Ai=r("a"),sL=n("google-research/text-to-text-transfer-transformer"),hL=n(" by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu."),cL=d(),dn=r("li"),q4=r("strong"),Vs=r("a"),gL=n("TAPAS"),fL=n(" (from Google AI) released with the paper "),Ri=r("a"),uL=n("TAPAS: Weakly Supervised Table Parsing via Pre-training"),vL=n(" by Jonathan Herzig, Pawe\u0142 Krzysztof Nowak, Thomas M\xFCller, Francesco Piccinno and Julian Martin Eisenschlos."),EL=d(),sn=r("li"),j4=r("strong"),Zs=r("a"),TL=n("TAPEX"),_L=n(" (from Microsoft Research) released with the paper "),yi=r("a"),mL=n("TAPEX: Table Pre-training via Learning a Neural SQL Executor"),pL=n(" by Qian Liu, Bei Chen, Jiaqi Guo, Morteza Ziyadi, Zeqi Lin, Weizhu Chen, Jian-Guang Lou."),DL=d(),_a=r("li"),Q4=r("strong"),Xs=r("a"),AL=n("Transformer-XL"),RL=n(" (from Google/CMU) released with the paper "),bi=r("a"),yL=n("Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context"),bL=n(" by Zihang Dai"),$4=r("em"),LL=n(", Zhilin Yang"),SL=n(", Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov."),wL=d(),hn=r("li"),em=r("strong"),zs=r("a"),ML=n("TrOCR"),PL=n(" (from Microsoft), released together with the paper "),Li=r("a"),GL=n("TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models"),NL=n(" by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei."),kL=d(),cn=r("li"),tm=r("strong"),Hs=r("a"),CL=n("UniSpeech"),IL=n(" (from Microsoft Research) released with the paper "),Si=r("a"),BL=n("UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data"),xL=n(" by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang."),OL=d(),gn=r("li"),rm=r("strong"),Us=r("a"),FL=n("UniSpeechSat"),WL=n(" (from Microsoft Research) released with the paper "),wi=r("a"),JL=n("UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING"),YL=n(" by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu."),KL=d(),fn=r("li"),am=r("strong"),qs=r("a"),VL=n("VAN"),ZL=n(" (from Tsinghua University and Nankai University) released with the paper "),Mi=r("a"),XL=n("Visual Attention Network"),zL=n(" by Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu."),HL=d(),un=r("li"),lm=r("strong"),js=r("a"),UL=n("ViLT"),qL=n(" (from NAVER AI Lab/Kakao Enterprise/Kakao Brain) released with the paper "),Pi=r("a"),jL=n("ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision"),QL=n(" by Wonjae Kim, Bokyung Son, Ildoo Kim."),$L=d(),vn=r("li"),nm=r("strong"),Qs=r("a"),eS=n("Vision Transformer (ViT)"),tS=n(" (from Google AI) released with the paper "),Gi=r("a"),rS=n("An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale"),aS=n(" by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby."),lS=d(),En=r("li"),om=r("strong"),$s=r("a"),nS=n("ViTMAE"),oS=n(" (from Meta AI) released with the paper "),Ni=r("a"),iS=n("Masked Autoencoders Are Scalable Vision Learners"),dS=n(" by Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Doll\xE1r, Ross Girshick."),sS=d(),Tn=r("li"),im=r("strong"),eh=r("a"),hS=n("VisualBERT"),cS=n(" (from UCLA NLP) released with the paper "),ki=r("a"),gS=n("VisualBERT: A Simple and Performant Baseline for Vision and Language"),fS=n(" by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang."),uS=d(),_n=r("li"),dm=r("strong"),th=r("a"),vS=n("WavLM"),ES=n(" (from Microsoft Research) released with the paper "),Ci=r("a"),TS=n("WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing"),_S=n(" by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei."),mS=d(),mn=r("li"),sm=r("strong"),rh=r("a"),pS=n("Wav2Vec2"),DS=n(" (from Facebook AI) released with the paper "),Ii=r("a"),AS=n("wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations"),RS=n(" by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli."),yS=d(),pn=r("li"),hm=r("strong"),ah=r("a"),bS=n("Wav2Vec2Phoneme"),LS=n(" (from Facebook AI) released with the paper "),Bi=r("a"),SS=n("Simple and Effective Zero-shot Cross-lingual Phoneme Recognition"),wS=n(" by Qiantong Xu, Alexei Baevski, Michael Auli."),MS=d(),Dn=r("li"),cm=r("strong"),lh=r("a"),PS=n("XGLM"),GS=n(" (From Facebook AI) released with the paper "),xi=r("a"),NS=n("Few-shot Learning with Multilingual Language Models"),kS=n(" by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O\u2019Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li."),CS=d(),An=r("li"),gm=r("strong"),nh=r("a"),IS=n("XLM"),BS=n(" (from Facebook) released together with the paper "),Oi=r("a"),xS=n("Cross-lingual Language Model Pretraining"),OS=n(" by Guillaume Lample and Alexis Conneau."),FS=d(),Rn=r("li"),fm=r("strong"),oh=r("a"),WS=n("XLM-ProphetNet"),JS=n(" (from Microsoft Research) released with the paper "),Fi=r("a"),YS=n("ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training"),KS=n(" by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou."),VS=d(),ma=r("li"),um=r("strong"),ih=r("a"),ZS=n("XLM-RoBERTa"),XS=n(" (from Facebook AI), released together with the paper "),Wi=r("a"),zS=n("Unsupervised Cross-lingual Representation Learning at Scale"),HS=n(" by Alexis Conneau"),vm=r("em"),US=n(", Kartikay Khandelwal"),qS=n(", Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzm\xE1n, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov."),jS=d(),yn=r("li"),Em=r("strong"),dh=r("a"),QS=n("XLM-RoBERTa-XL"),$S=n(" (from Facebook AI), released together with the paper "),Ji=r("a"),ew=n("Larger-Scale Transformers for Multilingual Masked Language Modeling"),tw=n(" by Naman Goyal, Jingfei Du, Myle Ott, Giri Anantharaman, Alexis Conneau."),rw=d(),pa=r("li"),Tm=r("strong"),sh=r("a"),aw=n("XLNet"),lw=n(" (from Google/CMU) released with the paper "),Yi=r("a"),nw=n("\u200BXLNet: Generalized Autoregressive Pretraining for Language Understanding"),ow=n(" by Zhilin Yang"),_m=r("em"),iw=n(", Zihang Dai"),dw=n(", Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le."),sw=d(),bn=r("li"),mm=r("strong"),hh=r("a"),hw=n("XLSR-Wav2Vec2"),cw=n(" (from Facebook AI) released with the paper "),Ki=r("a"),gw=n("Unsupervised Cross-Lingual Representation Learning For Speech Recognition"),fw=n(" by Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael Auli."),uw=d(),Ln=r("li"),pm=r("strong"),ch=r("a"),vw=n("XLS-R"),Ew=n(" (from Facebook AI) released with the paper "),Vi=r("a"),Tw=n("XLS-R: Self-supervised Cross-lingual Speech Representation Learning at Scale"),_w=n(" by Arun Babu, Changhan Wang, Andros Tjandra, Kushal Lakhotia, Qiantong Xu, Naman Goyal, Kritika Singh, Patrick von Platen, Yatharth Saraf, Juan Pino, Alexei Baevski, Alexis Conneau, Michael Auli."),mw=d(),Sn=r("li"),Dm=r("strong"),gh=r("a"),pw=n("YOSO"),Dw=n(" (from the University of Wisconsin - Madison) released with the paper "),Zi=r("a"),Aw=n("You Only Sample (Almost) Once: Linear Cost Self-Attention Via Bernoulli Sampling"),Rw=n(" by Zhanpeng Zeng, Yunyang Xiong, Sathya N. Ravi, Shailesh Acharya, Glenn Fung, Vikas Singh."),Wp=d(),Sa=r("h3"),wn=r("a"),Am=r("span"),mp(Xi.$$.fragment),yw=d(),Rm=r("span"),bw=n("Frameworks aceitos"),Jp=d(),fh=r("p"),Lw=n(`A tabela abaixo representa a lista de suporte na biblioteca para cada um dos seguintes modelos, caso tenham um tokenizer do Python (chamado de \u201Cslow\u201D), ou um tokenizer constru\xEDdo em cima da biblioteca \u{1F917} Tokenizers (chamado de \u201Cfast\u201D). Al\xE9m disso, s\xE3o diferenciados pelo suporte em diferentes frameworks: JAX (por meio do Flax); PyTorch; e/ou Tensorflow.`),Yp=d(),Mn=r("table"),ym=r("thead"),m=r("tr"),uh=r("th"),Sw=n("Model"),ww=d(),vh=r("th"),Mw=n("Tokenizer slow"),Pw=d(),Eh=r("th"),Gw=n("Tokenizer fast"),Nw=d(),Th=r("th"),kw=n("PyTorch support"),Cw=d(),_h=r("th"),Iw=n("TensorFlow support"),Bw=d(),mh=r("th"),xw=n("Flax Support"),Ow=d(),g=r("tbody"),p=r("tr"),ph=r("td"),Fw=n("ALBERT"),Ww=d(),Dh=r("td"),Jw=n("\u2705"),Yw=d(),Ah=r("td"),Kw=n("\u2705"),Vw=d(),Rh=r("td"),Zw=n("\u2705"),Xw=d(),yh=r("td"),zw=n("\u2705"),Hw=d(),bh=r("td"),Uw=n("\u2705"),qw=d(),D=r("tr"),Lh=r("td"),jw=n("BART"),Qw=d(),Sh=r("td"),$w=n("\u2705"),eM=d(),wh=r("td"),tM=n("\u2705"),rM=d(),Mh=r("td"),aM=n("\u2705"),lM=d(),Ph=r("td"),nM=n("\u2705"),oM=d(),Gh=r("td"),iM=n("\u2705"),dM=d(),A=r("tr"),Nh=r("td"),sM=n("BEiT"),hM=d(),kh=r("td"),cM=n("\u274C"),gM=d(),Ch=r("td"),fM=n("\u274C"),uM=d(),Ih=r("td"),vM=n("\u2705"),EM=d(),Bh=r("td"),TM=n("\u274C"),_M=d(),xh=r("td"),mM=n("\u2705"),pM=d(),R=r("tr"),Oh=r("td"),DM=n("BERT"),AM=d(),Fh=r("td"),RM=n("\u2705"),yM=d(),Wh=r("td"),bM=n("\u2705"),LM=d(),Jh=r("td"),SM=n("\u2705"),wM=d(),Yh=r("td"),MM=n("\u2705"),PM=d(),Kh=r("td"),GM=n("\u2705"),NM=d(),y=r("tr"),Vh=r("td"),kM=n("Bert Generation"),CM=d(),Zh=r("td"),IM=n("\u2705"),BM=d(),Xh=r("td"),xM=n("\u274C"),OM=d(),zh=r("td"),FM=n("\u2705"),WM=d(),Hh=r("td"),JM=n("\u274C"),YM=d(),Uh=r("td"),KM=n("\u274C"),VM=d(),b=r("tr"),qh=r("td"),ZM=n("BigBird"),XM=d(),jh=r("td"),zM=n("\u2705"),HM=d(),Qh=r("td"),UM=n("\u2705"),qM=d(),$h=r("td"),jM=n("\u2705"),QM=d(),ec=r("td"),$M=n("\u274C"),eP=d(),tc=r("td"),tP=n("\u2705"),rP=d(),L=r("tr"),rc=r("td"),aP=n("BigBirdPegasus"),lP=d(),ac=r("td"),nP=n("\u274C"),oP=d(),lc=r("td"),iP=n("\u274C"),dP=d(),nc=r("td"),sP=n("\u2705"),hP=d(),oc=r("td"),cP=n("\u274C"),gP=d(),ic=r("td"),fP=n("\u274C"),uP=d(),S=r("tr"),dc=r("td"),vP=n("Blenderbot"),EP=d(),sc=r("td"),TP=n("\u2705"),_P=d(),hc=r("td"),mP=n("\u2705"),pP=d(),cc=r("td"),DP=n("\u2705"),AP=d(),gc=r("td"),RP=n("\u2705"),yP=d(),fc=r("td"),bP=n("\u2705"),LP=d(),w=r("tr"),uc=r("td"),SP=n("BlenderbotSmall"),wP=d(),vc=r("td"),MP=n("\u2705"),PP=d(),Ec=r("td"),GP=n("\u2705"),NP=d(),Tc=r("td"),kP=n("\u2705"),CP=d(),_c=r("td"),IP=n("\u2705"),BP=d(),mc=r("td"),xP=n("\u2705"),OP=d(),M=r("tr"),pc=r("td"),FP=n("CamemBERT"),WP=d(),Dc=r("td"),JP=n("\u2705"),YP=d(),Ac=r("td"),KP=n("\u2705"),VP=d(),Rc=r("td"),ZP=n("\u2705"),XP=d(),yc=r("td"),zP=n("\u2705"),HP=d(),bc=r("td"),UP=n("\u274C"),qP=d(),P=r("tr"),Lc=r("td"),jP=n("Canine"),QP=d(),Sc=r("td"),$P=n("\u2705"),eG=d(),wc=r("td"),tG=n("\u274C"),rG=d(),Mc=r("td"),aG=n("\u2705"),lG=d(),Pc=r("td"),nG=n("\u274C"),oG=d(),Gc=r("td"),iG=n("\u274C"),dG=d(),G=r("tr"),Nc=r("td"),sG=n("CLIP"),hG=d(),kc=r("td"),cG=n("\u2705"),gG=d(),Cc=r("td"),fG=n("\u2705"),uG=d(),Ic=r("td"),vG=n("\u2705"),EG=d(),Bc=r("td"),TG=n("\u2705"),_G=d(),xc=r("td"),mG=n("\u2705"),pG=d(),N=r("tr"),Oc=r("td"),DG=n("ConvBERT"),AG=d(),Fc=r("td"),RG=n("\u2705"),yG=d(),Wc=r("td"),bG=n("\u2705"),LG=d(),Jc=r("td"),SG=n("\u2705"),wG=d(),Yc=r("td"),MG=n("\u2705"),PG=d(),Kc=r("td"),GG=n("\u274C"),NG=d(),k=r("tr"),Vc=r("td"),kG=n("ConvNext"),CG=d(),Zc=r("td"),IG=n("\u274C"),BG=d(),Xc=r("td"),xG=n("\u274C"),OG=d(),zc=r("td"),FG=n("\u2705"),WG=d(),Hc=r("td"),JG=n("\u2705"),YG=d(),Uc=r("td"),KG=n("\u274C"),VG=d(),C=r("tr"),qc=r("td"),ZG=n("CTRL"),XG=d(),jc=r("td"),zG=n("\u2705"),HG=d(),Qc=r("td"),UG=n("\u274C"),qG=d(),$c=r("td"),jG=n("\u2705"),QG=d(),eg=r("td"),$G=n("\u2705"),eN=d(),tg=r("td"),tN=n("\u274C"),rN=d(),I=r("tr"),rg=r("td"),aN=n("Data2VecAudio"),lN=d(),ag=r("td"),nN=n("\u274C"),oN=d(),lg=r("td"),iN=n("\u274C"),dN=d(),ng=r("td"),sN=n("\u2705"),hN=d(),og=r("td"),cN=n("\u274C"),gN=d(),ig=r("td"),fN=n("\u274C"),uN=d(),B=r("tr"),dg=r("td"),vN=n("Data2VecText"),EN=d(),sg=r("td"),TN=n("\u274C"),_N=d(),hg=r("td"),mN=n("\u274C"),pN=d(),cg=r("td"),DN=n("\u2705"),AN=d(),gg=r("td"),RN=n("\u274C"),yN=d(),fg=r("td"),bN=n("\u274C"),LN=d(),x=r("tr"),ug=r("td"),SN=n("Data2VecVision"),wN=d(),vg=r("td"),MN=n("\u274C"),PN=d(),Eg=r("td"),GN=n("\u274C"),NN=d(),Tg=r("td"),kN=n("\u2705"),CN=d(),_g=r("td"),IN=n("\u274C"),BN=d(),mg=r("td"),xN=n("\u274C"),ON=d(),O=r("tr"),pg=r("td"),FN=n("DeBERTa"),WN=d(),Dg=r("td"),JN=n("\u2705"),YN=d(),Ag=r("td"),KN=n("\u2705"),VN=d(),Rg=r("td"),ZN=n("\u2705"),XN=d(),yg=r("td"),zN=n("\u2705"),HN=d(),bg=r("td"),UN=n("\u274C"),qN=d(),F=r("tr"),Lg=r("td"),jN=n("DeBERTa-v2"),QN=d(),Sg=r("td"),$N=n("\u2705"),ek=d(),wg=r("td"),tk=n("\u2705"),rk=d(),Mg=r("td"),ak=n("\u2705"),lk=d(),Pg=r("td"),nk=n("\u2705"),ok=d(),Gg=r("td"),ik=n("\u274C"),dk=d(),W=r("tr"),Ng=r("td"),sk=n("Decision Transformer"),hk=d(),kg=r("td"),ck=n("\u274C"),gk=d(),Cg=r("td"),fk=n("\u274C"),uk=d(),Ig=r("td"),vk=n("\u2705"),Ek=d(),Bg=r("td"),Tk=n("\u274C"),_k=d(),xg=r("td"),mk=n("\u274C"),pk=d(),J=r("tr"),Og=r("td"),Dk=n("DeiT"),Ak=d(),Fg=r("td"),Rk=n("\u274C"),yk=d(),Wg=r("td"),bk=n("\u274C"),Lk=d(),Jg=r("td"),Sk=n("\u2705"),wk=d(),Yg=r("td"),Mk=n("\u274C"),Pk=d(),Kg=r("td"),Gk=n("\u274C"),Nk=d(),Y=r("tr"),Vg=r("td"),kk=n("DETR"),Ck=d(),Zg=r("td"),Ik=n("\u274C"),Bk=d(),Xg=r("td"),xk=n("\u274C"),Ok=d(),zg=r("td"),Fk=n("\u2705"),Wk=d(),Hg=r("td"),Jk=n("\u274C"),Yk=d(),Ug=r("td"),Kk=n("\u274C"),Vk=d(),K=r("tr"),qg=r("td"),Zk=n("DistilBERT"),Xk=d(),jg=r("td"),zk=n("\u2705"),Hk=d(),Qg=r("td"),Uk=n("\u2705"),qk=d(),$g=r("td"),jk=n("\u2705"),Qk=d(),e1=r("td"),$k=n("\u2705"),eC=d(),t1=r("td"),tC=n("\u2705"),rC=d(),V=r("tr"),r1=r("td"),aC=n("DPR"),lC=d(),a1=r("td"),nC=n("\u2705"),oC=d(),l1=r("td"),iC=n("\u2705"),dC=d(),n1=r("td"),sC=n("\u2705"),hC=d(),o1=r("td"),cC=n("\u2705"),gC=d(),i1=r("td"),fC=n("\u274C"),uC=d(),Z=r("tr"),d1=r("td"),vC=n("DPT"),EC=d(),s1=r("td"),TC=n("\u274C"),_C=d(),h1=r("td"),mC=n("\u274C"),pC=d(),c1=r("td"),DC=n("\u2705"),AC=d(),g1=r("td"),RC=n("\u274C"),yC=d(),f1=r("td"),bC=n("\u274C"),LC=d(),X=r("tr"),u1=r("td"),SC=n("ELECTRA"),wC=d(),v1=r("td"),MC=n("\u2705"),PC=d(),E1=r("td"),GC=n("\u2705"),NC=d(),T1=r("td"),kC=n("\u2705"),CC=d(),_1=r("td"),IC=n("\u2705"),BC=d(),m1=r("td"),xC=n("\u2705"),OC=d(),z=r("tr"),p1=r("td"),FC=n("Encoder decoder"),WC=d(),D1=r("td"),JC=n("\u274C"),YC=d(),A1=r("td"),KC=n("\u274C"),VC=d(),R1=r("td"),ZC=n("\u2705"),XC=d(),y1=r("td"),zC=n("\u2705"),HC=d(),b1=r("td"),UC=n("\u2705"),qC=d(),H=r("tr"),L1=r("td"),jC=n("FairSeq Machine-Translation"),QC=d(),S1=r("td"),$C=n("\u2705"),eI=d(),w1=r("td"),tI=n("\u274C"),rI=d(),M1=r("td"),aI=n("\u2705"),lI=d(),P1=r("td"),nI=n("\u274C"),oI=d(),G1=r("td"),iI=n("\u274C"),dI=d(),U=r("tr"),N1=r("td"),sI=n("FlauBERT"),hI=d(),k1=r("td"),cI=n("\u2705"),gI=d(),C1=r("td"),fI=n("\u274C"),uI=d(),I1=r("td"),vI=n("\u2705"),EI=d(),B1=r("td"),TI=n("\u2705"),_I=d(),x1=r("td"),mI=n("\u274C"),pI=d(),q=r("tr"),O1=r("td"),DI=n("FNet"),AI=d(),F1=r("td"),RI=n("\u2705"),yI=d(),W1=r("td"),bI=n("\u2705"),LI=d(),J1=r("td"),SI=n("\u2705"),wI=d(),Y1=r("td"),MI=n("\u274C"),PI=d(),K1=r("td"),GI=n("\u274C"),NI=d(),j=r("tr"),V1=r("td"),kI=n("Funnel Transformer"),CI=d(),Z1=r("td"),II=n("\u2705"),BI=d(),X1=r("td"),xI=n("\u2705"),OI=d(),z1=r("td"),FI=n("\u2705"),WI=d(),H1=r("td"),JI=n("\u2705"),YI=d(),U1=r("td"),KI=n("\u274C"),VI=d(),Q=r("tr"),q1=r("td"),ZI=n("GLPN"),XI=d(),j1=r("td"),zI=n("\u274C"),HI=d(),Q1=r("td"),UI=n("\u274C"),qI=d(),$1=r("td"),jI=n("\u2705"),QI=d(),ef=r("td"),$I=n("\u274C"),eB=d(),tf=r("td"),tB=n("\u274C"),rB=d(),$=r("tr"),rf=r("td"),aB=n("GPT Neo"),lB=d(),af=r("td"),nB=n("\u274C"),oB=d(),lf=r("td"),iB=n("\u274C"),dB=d(),nf=r("td"),sB=n("\u2705"),hB=d(),of=r("td"),cB=n("\u274C"),gB=d(),df=r("td"),fB=n("\u2705"),uB=d(),ee=r("tr"),sf=r("td"),vB=n("GPT-J"),EB=d(),hf=r("td"),TB=n("\u274C"),_B=d(),cf=r("td"),mB=n("\u274C"),pB=d(),gf=r("td"),DB=n("\u2705"),AB=d(),ff=r("td"),RB=n("\u2705"),yB=d(),uf=r("td"),bB=n("\u2705"),LB=d(),te=r("tr"),vf=r("td"),SB=n("Hubert"),wB=d(),Ef=r("td"),MB=n("\u274C"),PB=d(),Tf=r("td"),GB=n("\u274C"),NB=d(),_f=r("td"),kB=n("\u2705"),CB=d(),mf=r("td"),IB=n("\u2705"),BB=d(),pf=r("td"),xB=n("\u274C"),OB=d(),re=r("tr"),Df=r("td"),FB=n("I-BERT"),WB=d(),Af=r("td"),JB=n("\u274C"),YB=d(),Rf=r("td"),KB=n("\u274C"),VB=d(),yf=r("td"),ZB=n("\u2705"),XB=d(),bf=r("td"),zB=n("\u274C"),HB=d(),Lf=r("td"),UB=n("\u274C"),qB=d(),ae=r("tr"),Sf=r("td"),jB=n("ImageGPT"),QB=d(),wf=r("td"),$B=n("\u274C"),ex=d(),Mf=r("td"),tx=n("\u274C"),rx=d(),Pf=r("td"),ax=n("\u2705"),lx=d(),Gf=r("td"),nx=n("\u274C"),ox=d(),Nf=r("td"),ix=n("\u274C"),dx=d(),le=r("tr"),kf=r("td"),sx=n("LayoutLM"),hx=d(),Cf=r("td"),cx=n("\u2705"),gx=d(),If=r("td"),fx=n("\u2705"),ux=d(),Bf=r("td"),vx=n("\u2705"),Ex=d(),xf=r("td"),Tx=n("\u2705"),_x=d(),Of=r("td"),mx=n("\u274C"),px=d(),ne=r("tr"),Ff=r("td"),Dx=n("LayoutLMv2"),Ax=d(),Wf=r("td"),Rx=n("\u2705"),yx=d(),Jf=r("td"),bx=n("\u2705"),Lx=d(),Yf=r("td"),Sx=n("\u2705"),wx=d(),Kf=r("td"),Mx=n("\u274C"),Px=d(),Vf=r("td"),Gx=n("\u274C"),Nx=d(),oe=r("tr"),Zf=r("td"),kx=n("LED"),Cx=d(),Xf=r("td"),Ix=n("\u2705"),Bx=d(),zf=r("td"),xx=n("\u2705"),Ox=d(),Hf=r("td"),Fx=n("\u2705"),Wx=d(),Uf=r("td"),Jx=n("\u2705"),Yx=d(),qf=r("td"),Kx=n("\u274C"),Vx=d(),ie=r("tr"),jf=r("td"),Zx=n("Longformer"),Xx=d(),Qf=r("td"),zx=n("\u2705"),Hx=d(),$f=r("td"),Ux=n("\u2705"),qx=d(),eu=r("td"),jx=n("\u2705"),Qx=d(),tu=r("td"),$x=n("\u2705"),eO=d(),ru=r("td"),tO=n("\u274C"),rO=d(),de=r("tr"),au=r("td"),aO=n("LUKE"),lO=d(),lu=r("td"),nO=n("\u2705"),oO=d(),nu=r("td"),iO=n("\u274C"),dO=d(),ou=r("td"),sO=n("\u2705"),hO=d(),iu=r("td"),cO=n("\u274C"),gO=d(),du=r("td"),fO=n("\u274C"),uO=d(),se=r("tr"),su=r("td"),vO=n("LXMERT"),EO=d(),hu=r("td"),TO=n("\u2705"),_O=d(),cu=r("td"),mO=n("\u2705"),pO=d(),gu=r("td"),DO=n("\u2705"),AO=d(),fu=r("td"),RO=n("\u2705"),yO=d(),uu=r("td"),bO=n("\u274C"),LO=d(),he=r("tr"),vu=r("td"),SO=n("M2M100"),wO=d(),Eu=r("td"),MO=n("\u2705"),PO=d(),Tu=r("td"),GO=n("\u274C"),NO=d(),_u=r("td"),kO=n("\u2705"),CO=d(),mu=r("td"),IO=n("\u274C"),BO=d(),pu=r("td"),xO=n("\u274C"),OO=d(),ce=r("tr"),Du=r("td"),FO=n("Marian"),WO=d(),Au=r("td"),JO=n("\u2705"),YO=d(),Ru=r("td"),KO=n("\u274C"),VO=d(),yu=r("td"),ZO=n("\u2705"),XO=d(),bu=r("td"),zO=n("\u2705"),HO=d(),Lu=r("td"),UO=n("\u2705"),qO=d(),ge=r("tr"),Su=r("td"),jO=n("MaskFormer"),QO=d(),wu=r("td"),$O=n("\u274C"),eF=d(),Mu=r("td"),tF=n("\u274C"),rF=d(),Pu=r("td"),aF=n("\u2705"),lF=d(),Gu=r("td"),nF=n("\u274C"),oF=d(),Nu=r("td"),iF=n("\u274C"),dF=d(),fe=r("tr"),ku=r("td"),sF=n("mBART"),hF=d(),Cu=r("td"),cF=n("\u2705"),gF=d(),Iu=r("td"),fF=n("\u2705"),uF=d(),Bu=r("td"),vF=n("\u2705"),EF=d(),xu=r("td"),TF=n("\u2705"),_F=d(),Ou=r("td"),mF=n("\u2705"),pF=d(),ue=r("tr"),Fu=r("td"),DF=n("MegatronBert"),AF=d(),Wu=r("td"),RF=n("\u274C"),yF=d(),Ju=r("td"),bF=n("\u274C"),LF=d(),Yu=r("td"),SF=n("\u2705"),wF=d(),Ku=r("td"),MF=n("\u274C"),PF=d(),Vu=r("td"),GF=n("\u274C"),NF=d(),ve=r("tr"),Zu=r("td"),kF=n("MobileBERT"),CF=d(),Xu=r("td"),IF=n("\u2705"),BF=d(),zu=r("td"),xF=n("\u2705"),OF=d(),Hu=r("td"),FF=n("\u2705"),WF=d(),Uu=r("td"),JF=n("\u2705"),YF=d(),qu=r("td"),KF=n("\u274C"),VF=d(),Ee=r("tr"),ju=r("td"),ZF=n("MPNet"),XF=d(),Qu=r("td"),zF=n("\u2705"),HF=d(),$u=r("td"),UF=n("\u2705"),qF=d(),ev=r("td"),jF=n("\u2705"),QF=d(),tv=r("td"),$F=n("\u2705"),eW=d(),rv=r("td"),tW=n("\u274C"),rW=d(),Te=r("tr"),av=r("td"),aW=n("mT5"),lW=d(),lv=r("td"),nW=n("\u2705"),oW=d(),nv=r("td"),iW=n("\u2705"),dW=d(),ov=r("td"),sW=n("\u2705"),hW=d(),iv=r("td"),cW=n("\u2705"),gW=d(),dv=r("td"),fW=n("\u2705"),uW=d(),_e=r("tr"),sv=r("td"),vW=n("Nystromformer"),EW=d(),hv=r("td"),TW=n("\u274C"),_W=d(),cv=r("td"),mW=n("\u274C"),pW=d(),gv=r("td"),DW=n("\u2705"),AW=d(),fv=r("td"),RW=n("\u274C"),yW=d(),uv=r("td"),bW=n("\u274C"),LW=d(),me=r("tr"),vv=r("td"),SW=n("OpenAI GPT"),wW=d(),Ev=r("td"),MW=n("\u2705"),PW=d(),Tv=r("td"),GW=n("\u2705"),NW=d(),_v=r("td"),kW=n("\u2705"),CW=d(),mv=r("td"),IW=n("\u2705"),BW=d(),pv=r("td"),xW=n("\u274C"),OW=d(),pe=r("tr"),Dv=r("td"),FW=n("OpenAI GPT-2"),WW=d(),Av=r("td"),JW=n("\u2705"),YW=d(),Rv=r("td"),KW=n("\u2705"),VW=d(),yv=r("td"),ZW=n("\u2705"),XW=d(),bv=r("td"),zW=n("\u2705"),HW=d(),Lv=r("td"),UW=n("\u2705"),qW=d(),De=r("tr"),Sv=r("td"),jW=n("Pegasus"),QW=d(),wv=r("td"),$W=n("\u2705"),eJ=d(),Mv=r("td"),tJ=n("\u2705"),rJ=d(),Pv=r("td"),aJ=n("\u2705"),lJ=d(),Gv=r("td"),nJ=n("\u2705"),oJ=d(),Nv=r("td"),iJ=n("\u2705"),dJ=d(),Ae=r("tr"),kv=r("td"),sJ=n("Perceiver"),hJ=d(),Cv=r("td"),cJ=n("\u2705"),gJ=d(),Iv=r("td"),fJ=n("\u274C"),uJ=d(),Bv=r("td"),vJ=n("\u2705"),EJ=d(),xv=r("td"),TJ=n("\u274C"),_J=d(),Ov=r("td"),mJ=n("\u274C"),pJ=d(),Re=r("tr"),Fv=r("td"),DJ=n("PLBart"),AJ=d(),Wv=r("td"),RJ=n("\u2705"),yJ=d(),Jv=r("td"),bJ=n("\u274C"),LJ=d(),Yv=r("td"),SJ=n("\u2705"),wJ=d(),Kv=r("td"),MJ=n("\u274C"),PJ=d(),Vv=r("td"),GJ=n("\u274C"),NJ=d(),ye=r("tr"),Zv=r("td"),kJ=n("PoolFormer"),CJ=d(),Xv=r("td"),IJ=n("\u274C"),BJ=d(),zv=r("td"),xJ=n("\u274C"),OJ=d(),Hv=r("td"),FJ=n("\u2705"),WJ=d(),Uv=r("td"),JJ=n("\u274C"),YJ=d(),qv=r("td"),KJ=n("\u274C"),VJ=d(),be=r("tr"),jv=r("td"),ZJ=n("ProphetNet"),XJ=d(),Qv=r("td"),zJ=n("\u2705"),HJ=d(),$v=r("td"),UJ=n("\u274C"),qJ=d(),eE=r("td"),jJ=n("\u2705"),QJ=d(),tE=r("td"),$J=n("\u274C"),eY=d(),rE=r("td"),tY=n("\u274C"),rY=d(),Le=r("tr"),aE=r("td"),aY=n("QDQBert"),lY=d(),lE=r("td"),nY=n("\u274C"),oY=d(),nE=r("td"),iY=n("\u274C"),dY=d(),oE=r("td"),sY=n("\u2705"),hY=d(),iE=r("td"),cY=n("\u274C"),gY=d(),dE=r("td"),fY=n("\u274C"),uY=d(),Se=r("tr"),sE=r("td"),vY=n("RAG"),EY=d(),hE=r("td"),TY=n("\u2705"),_Y=d(),cE=r("td"),mY=n("\u274C"),pY=d(),gE=r("td"),DY=n("\u2705"),AY=d(),fE=r("td"),RY=n("\u2705"),yY=d(),uE=r("td"),bY=n("\u274C"),LY=d(),we=r("tr"),vE=r("td"),SY=n("Realm"),wY=d(),EE=r("td"),MY=n("\u2705"),PY=d(),TE=r("td"),GY=n("\u2705"),NY=d(),_E=r("td"),kY=n("\u2705"),CY=d(),mE=r("td"),IY=n("\u274C"),BY=d(),pE=r("td"),xY=n("\u274C"),OY=d(),Me=r("tr"),DE=r("td"),FY=n("Reformer"),WY=d(),AE=r("td"),JY=n("\u2705"),YY=d(),RE=r("td"),KY=n("\u2705"),VY=d(),yE=r("td"),ZY=n("\u2705"),XY=d(),bE=r("td"),zY=n("\u274C"),HY=d(),LE=r("td"),UY=n("\u274C"),qY=d(),Pe=r("tr"),SE=r("td"),jY=n("RegNet"),QY=d(),wE=r("td"),$Y=n("\u274C"),eK=d(),ME=r("td"),tK=n("\u274C"),rK=d(),PE=r("td"),aK=n("\u2705"),lK=d(),GE=r("td"),nK=n("\u274C"),oK=d(),NE=r("td"),iK=n("\u274C"),dK=d(),Ge=r("tr"),kE=r("td"),sK=n("RemBERT"),hK=d(),CE=r("td"),cK=n("\u2705"),gK=d(),IE=r("td"),fK=n("\u2705"),uK=d(),BE=r("td"),vK=n("\u2705"),EK=d(),xE=r("td"),TK=n("\u2705"),_K=d(),OE=r("td"),mK=n("\u274C"),pK=d(),Ne=r("tr"),FE=r("td"),DK=n("ResNet"),AK=d(),WE=r("td"),RK=n("\u274C"),yK=d(),JE=r("td"),bK=n("\u274C"),LK=d(),YE=r("td"),SK=n("\u2705"),wK=d(),KE=r("td"),MK=n("\u274C"),PK=d(),VE=r("td"),GK=n("\u274C"),NK=d(),ke=r("tr"),ZE=r("td"),kK=n("RetriBERT"),CK=d(),XE=r("td"),IK=n("\u2705"),BK=d(),zE=r("td"),xK=n("\u2705"),OK=d(),HE=r("td"),FK=n("\u2705"),WK=d(),UE=r("td"),JK=n("\u274C"),YK=d(),qE=r("td"),KK=n("\u274C"),VK=d(),Ce=r("tr"),jE=r("td"),ZK=n("RoBERTa"),XK=d(),QE=r("td"),zK=n("\u2705"),HK=d(),$E=r("td"),UK=n("\u2705"),qK=d(),e2=r("td"),jK=n("\u2705"),QK=d(),t2=r("td"),$K=n("\u2705"),eV=d(),r2=r("td"),tV=n("\u2705"),rV=d(),Ie=r("tr"),a2=r("td"),aV=n("RoFormer"),lV=d(),l2=r("td"),nV=n("\u2705"),oV=d(),n2=r("td"),iV=n("\u2705"),dV=d(),o2=r("td"),sV=n("\u2705"),hV=d(),i2=r("td"),cV=n("\u2705"),gV=d(),d2=r("td"),fV=n("\u2705"),uV=d(),Be=r("tr"),s2=r("td"),vV=n("SegFormer"),EV=d(),h2=r("td"),TV=n("\u274C"),_V=d(),c2=r("td"),mV=n("\u274C"),pV=d(),g2=r("td"),DV=n("\u2705"),AV=d(),f2=r("td"),RV=n("\u274C"),yV=d(),u2=r("td"),bV=n("\u274C"),LV=d(),xe=r("tr"),v2=r("td"),SV=n("SEW"),wV=d(),E2=r("td"),MV=n("\u274C"),PV=d(),T2=r("td"),GV=n("\u274C"),NV=d(),_2=r("td"),kV=n("\u2705"),CV=d(),m2=r("td"),IV=n("\u274C"),BV=d(),p2=r("td"),xV=n("\u274C"),OV=d(),Oe=r("tr"),D2=r("td"),FV=n("SEW-D"),WV=d(),A2=r("td"),JV=n("\u274C"),YV=d(),R2=r("td"),KV=n("\u274C"),VV=d(),y2=r("td"),ZV=n("\u2705"),XV=d(),b2=r("td"),zV=n("\u274C"),HV=d(),L2=r("td"),UV=n("\u274C"),qV=d(),Fe=r("tr"),S2=r("td"),jV=n("Speech Encoder decoder"),QV=d(),w2=r("td"),$V=n("\u274C"),eZ=d(),M2=r("td"),tZ=n("\u274C"),rZ=d(),P2=r("td"),aZ=n("\u2705"),lZ=d(),G2=r("td"),nZ=n("\u274C"),oZ=d(),N2=r("td"),iZ=n("\u2705"),dZ=d(),We=r("tr"),k2=r("td"),sZ=n("Speech2Text"),hZ=d(),C2=r("td"),cZ=n("\u2705"),gZ=d(),I2=r("td"),fZ=n("\u274C"),uZ=d(),B2=r("td"),vZ=n("\u2705"),EZ=d(),x2=r("td"),TZ=n("\u2705"),_Z=d(),O2=r("td"),mZ=n("\u274C"),pZ=d(),Je=r("tr"),F2=r("td"),DZ=n("Speech2Text2"),AZ=d(),W2=r("td"),RZ=n("\u2705"),yZ=d(),J2=r("td"),bZ=n("\u274C"),LZ=d(),Y2=r("td"),SZ=n("\u274C"),wZ=d(),K2=r("td"),MZ=n("\u274C"),PZ=d(),V2=r("td"),GZ=n("\u274C"),NZ=d(),Ye=r("tr"),Z2=r("td"),kZ=n("Splinter"),CZ=d(),X2=r("td"),IZ=n("\u2705"),BZ=d(),z2=r("td"),xZ=n("\u2705"),OZ=d(),H2=r("td"),FZ=n("\u2705"),WZ=d(),U2=r("td"),JZ=n("\u274C"),YZ=d(),q2=r("td"),KZ=n("\u274C"),VZ=d(),Ke=r("tr"),j2=r("td"),ZZ=n("SqueezeBERT"),XZ=d(),Q2=r("td"),zZ=n("\u2705"),HZ=d(),$2=r("td"),UZ=n("\u2705"),qZ=d(),eT=r("td"),jZ=n("\u2705"),QZ=d(),tT=r("td"),$Z=n("\u274C"),eX=d(),rT=r("td"),tX=n("\u274C"),rX=d(),Ve=r("tr"),aT=r("td"),aX=n("Swin"),lX=d(),lT=r("td"),nX=n("\u274C"),oX=d(),nT=r("td"),iX=n("\u274C"),dX=d(),oT=r("td"),sX=n("\u2705"),hX=d(),iT=r("td"),cX=n("\u274C"),gX=d(),dT=r("td"),fX=n("\u274C"),uX=d(),Ze=r("tr"),sT=r("td"),vX=n("T5"),EX=d(),hT=r("td"),TX=n("\u2705"),_X=d(),cT=r("td"),mX=n("\u2705"),pX=d(),gT=r("td"),DX=n("\u2705"),AX=d(),fT=r("td"),RX=n("\u2705"),yX=d(),uT=r("td"),bX=n("\u2705"),LX=d(),Xe=r("tr"),vT=r("td"),SX=n("TAPAS"),wX=d(),ET=r("td"),MX=n("\u2705"),PX=d(),TT=r("td"),GX=n("\u274C"),NX=d(),_T=r("td"),kX=n("\u2705"),CX=d(),mT=r("td"),IX=n("\u2705"),BX=d(),pT=r("td"),xX=n("\u274C"),OX=d(),ze=r("tr"),DT=r("td"),FX=n("TAPEX"),WX=d(),AT=r("td"),JX=n("\u2705"),YX=d(),RT=r("td"),KX=n("\u2705"),VX=d(),yT=r("td"),ZX=n("\u2705"),XX=d(),bT=r("td"),zX=n("\u2705"),HX=d(),LT=r("td"),UX=n("\u2705"),qX=d(),He=r("tr"),ST=r("td"),jX=n("Transformer-XL"),QX=d(),wT=r("td"),$X=n("\u2705"),ez=d(),MT=r("td"),tz=n("\u274C"),rz=d(),PT=r("td"),az=n("\u2705"),lz=d(),GT=r("td"),nz=n("\u2705"),oz=d(),NT=r("td"),iz=n("\u274C"),dz=d(),Ue=r("tr"),kT=r("td"),sz=n("TrOCR"),hz=d(),CT=r("td"),cz=n("\u274C"),gz=d(),IT=r("td"),fz=n("\u274C"),uz=d(),BT=r("td"),vz=n("\u2705"),Ez=d(),xT=r("td"),Tz=n("\u274C"),_z=d(),OT=r("td"),mz=n("\u274C"),pz=d(),qe=r("tr"),FT=r("td"),Dz=n("UniSpeech"),Az=d(),WT=r("td"),Rz=n("\u274C"),yz=d(),JT=r("td"),bz=n("\u274C"),Lz=d(),YT=r("td"),Sz=n("\u2705"),wz=d(),KT=r("td"),Mz=n("\u274C"),Pz=d(),VT=r("td"),Gz=n("\u274C"),Nz=d(),je=r("tr"),ZT=r("td"),kz=n("UniSpeechSat"),Cz=d(),XT=r("td"),Iz=n("\u274C"),Bz=d(),zT=r("td"),xz=n("\u274C"),Oz=d(),HT=r("td"),Fz=n("\u2705"),Wz=d(),UT=r("td"),Jz=n("\u274C"),Yz=d(),qT=r("td"),Kz=n("\u274C"),Vz=d(),Qe=r("tr"),jT=r("td"),Zz=n("VAN"),Xz=d(),QT=r("td"),zz=n("\u274C"),Hz=d(),$T=r("td"),Uz=n("\u274C"),qz=d(),e_=r("td"),jz=n("\u2705"),Qz=d(),t_=r("td"),$z=n("\u274C"),eH=d(),r_=r("td"),tH=n("\u274C"),rH=d(),$e=r("tr"),a_=r("td"),aH=n("ViLT"),lH=d(),l_=r("td"),nH=n("\u274C"),oH=d(),n_=r("td"),iH=n("\u274C"),dH=d(),o_=r("td"),sH=n("\u2705"),hH=d(),i_=r("td"),cH=n("\u274C"),gH=d(),d_=r("td"),fH=n("\u274C"),uH=d(),et=r("tr"),s_=r("td"),vH=n("Vision Encoder decoder"),EH=d(),h_=r("td"),TH=n("\u274C"),_H=d(),c_=r("td"),mH=n("\u274C"),pH=d(),g_=r("td"),DH=n("\u2705"),AH=d(),f_=r("td"),RH=n("\u2705"),yH=d(),u_=r("td"),bH=n("\u2705"),LH=d(),tt=r("tr"),v_=r("td"),SH=n("VisionTextDualEncoder"),wH=d(),E_=r("td"),MH=n("\u274C"),PH=d(),T_=r("td"),GH=n("\u274C"),NH=d(),__=r("td"),kH=n("\u2705"),CH=d(),m_=r("td"),IH=n("\u274C"),BH=d(),p_=r("td"),xH=n("\u2705"),OH=d(),rt=r("tr"),D_=r("td"),FH=n("VisualBert"),WH=d(),A_=r("td"),JH=n("\u274C"),YH=d(),R_=r("td"),KH=n("\u274C"),VH=d(),y_=r("td"),ZH=n("\u2705"),XH=d(),b_=r("td"),zH=n("\u274C"),HH=d(),L_=r("td"),UH=n("\u274C"),qH=d(),at=r("tr"),S_=r("td"),jH=n("ViT"),QH=d(),w_=r("td"),$H=n("\u274C"),eU=d(),M_=r("td"),tU=n("\u274C"),rU=d(),P_=r("td"),aU=n("\u2705"),lU=d(),G_=r("td"),nU=n("\u2705"),oU=d(),N_=r("td"),iU=n("\u2705"),dU=d(),lt=r("tr"),k_=r("td"),sU=n("ViTMAE"),hU=d(),C_=r("td"),cU=n("\u274C"),gU=d(),I_=r("td"),fU=n("\u274C"),uU=d(),B_=r("td"),vU=n("\u2705"),EU=d(),x_=r("td"),TU=n("\u2705"),_U=d(),O_=r("td"),mU=n("\u274C"),pU=d(),nt=r("tr"),F_=r("td"),DU=n("Wav2Vec2"),AU=d(),W_=r("td"),RU=n("\u2705"),yU=d(),J_=r("td"),bU=n("\u274C"),LU=d(),Y_=r("td"),SU=n("\u2705"),wU=d(),K_=r("td"),MU=n("\u2705"),PU=d(),V_=r("td"),GU=n("\u2705"),NU=d(),ot=r("tr"),Z_=r("td"),kU=n("WavLM"),CU=d(),X_=r("td"),IU=n("\u274C"),BU=d(),z_=r("td"),xU=n("\u274C"),OU=d(),H_=r("td"),FU=n("\u2705"),WU=d(),U_=r("td"),JU=n("\u274C"),YU=d(),q_=r("td"),KU=n("\u274C"),VU=d(),it=r("tr"),j_=r("td"),ZU=n("XGLM"),XU=d(),Q_=r("td"),zU=n("\u2705"),HU=d(),$_=r("td"),UU=n("\u2705"),qU=d(),e3=r("td"),jU=n("\u2705"),QU=d(),t3=r("td"),$U=n("\u274C"),eq=d(),r3=r("td"),tq=n("\u2705"),rq=d(),dt=r("tr"),a3=r("td"),aq=n("XLM"),lq=d(),l3=r("td"),nq=n("\u2705"),oq=d(),n3=r("td"),iq=n("\u274C"),dq=d(),o3=r("td"),sq=n("\u2705"),hq=d(),i3=r("td"),cq=n("\u2705"),gq=d(),d3=r("td"),fq=n("\u274C"),uq=d(),st=r("tr"),s3=r("td"),vq=n("XLM-RoBERTa"),Eq=d(),h3=r("td"),Tq=n("\u2705"),_q=d(),c3=r("td"),mq=n("\u2705"),pq=d(),g3=r("td"),Dq=n("\u2705"),Aq=d(),f3=r("td"),Rq=n("\u2705"),yq=d(),u3=r("td"),bq=n("\u2705"),Lq=d(),ht=r("tr"),v3=r("td"),Sq=n("XLM-RoBERTa-XL"),wq=d(),E3=r("td"),Mq=n("\u274C"),Pq=d(),T3=r("td"),Gq=n("\u274C"),Nq=d(),_3=r("td"),kq=n("\u2705"),Cq=d(),m3=r("td"),Iq=n("\u274C"),Bq=d(),p3=r("td"),xq=n("\u274C"),Oq=d(),ct=r("tr"),D3=r("td"),Fq=n("XLMProphetNet"),Wq=d(),A3=r("td"),Jq=n("\u2705"),Yq=d(),R3=r("td"),Kq=n("\u274C"),Vq=d(),y3=r("td"),Zq=n("\u2705"),Xq=d(),b3=r("td"),zq=n("\u274C"),Hq=d(),L3=r("td"),Uq=n("\u274C"),qq=d(),gt=r("tr"),S3=r("td"),jq=n("XLNet"),Qq=d(),w3=r("td"),$q=n("\u2705"),ej=d(),M3=r("td"),tj=n("\u2705"),rj=d(),P3=r("td"),aj=n("\u2705"),lj=d(),G3=r("td"),nj=n("\u2705"),oj=d(),N3=r("td"),ij=n("\u274C"),dj=d(),ft=r("tr"),k3=r("td"),sj=n("YOSO"),hj=d(),C3=r("td"),cj=n("\u274C"),gj=d(),I3=r("td"),fj=n("\u274C"),uj=d(),B3=r("td"),vj=n("\u2705"),Ej=d(),x3=r("td"),Tj=n("\u274C"),_j=d(),O3=r("td"),mj=n("\u274C"),this.h()},l(u){const v=cve('[data-svelte="svelte-1phssyn"]',document.head);Da=a(v,"META",{name:!0,content:!0}),v.forEach(t),Lp=s(u),Aa=a(u,"H1",{class:!0});var Vp=l(Aa);Ma=a(Vp,"A",{id:!0,class:!0,href:!0});var Gj=l(Ma);F3=a(Gj,"SPAN",{});var Nj=l(F3);pp(Nn.$$.fragment,Nj),Nj.forEach(t),Gj.forEach(t),jp=s(Vp),W3=a(Vp,"SPAN",{});var kj=l(W3);Qp=o(kj,"\u{1F917} Transformers"),kj.forEach(t),Vp.forEach(t),Sp=s(u),$i=a(u,"P",{});var Cj=l($i);$p=o(Cj,`Estado da Arte para Aprendizado de M\xE1quina em PyTorch, TensorFlow e JAX. O \u{1F917} Transformers disponibiliza APIs para facilmente baixar e treinar modelos pr\xE9-treinados de \xFAltima gera\xE7\xE3o. O uso de modelos pr\xE9-treinados pode diminuir os seus custos de computa\xE7\xE3o, a sua pegada de carbono, al\xE9m de economizar o tempo necess\xE1rio para se treinar um modelo do zero. Os modelos podem ser usados para diversas tarefas:`),Cj.forEach(t),wp=s(u),vt=a(u,"UL",{});var Pn=l(vt);J3=a(Pn,"LI",{});var Ij=l(J3);e6=o(Ij,"\u{1F4DD} Textos: classifica\xE7\xE3o, extra\xE7\xE3o de informa\xE7\xF5es, perguntas e respostas, resumir, traduzir e gerar textos em mais de 100 idiomas."),Ij.forEach(t),t6=s(Pn),Y3=a(Pn,"LI",{});var Bj=l(Y3);r6=o(Bj,"\u{1F5BC} Imagens: classifica\xE7\xE3o, dete\xE7\xE3o de objetos, e segmenta\xE7\xE3o."),Bj.forEach(t),a6=s(Pn),K3=a(Pn,"LI",{});var xj=l(K3);l6=o(xj,"\u{1F5E3} Audio: reconhecimento de fala e classifica\xE7\xE3o de \xE1udio."),xj.forEach(t),n6=s(Pn),V3=a(Pn,"LI",{});var Oj=l(V3);o6=o(Oj,`\u{1F419} Multimodal: perguntas tabeladas e respsostas, reconhecimento \xF3tico de charact\xE9res, extra\xE7\xE3o de informa\xE7\xE3o de documentos escaneados, classifica\xE7\xE3o de v\xEDdeo, perguntas e respostas visuais.`),Oj.forEach(t),Pn.forEach(t),Mp=s(u),Et=a(u,"P",{});var Gn=l(Et);i6=o(Gn,`Nossa biblioteca aceita integra\xE7\xE3o cont\xEDnua entre tr\xEAs das bibliotecas mais populares de aprendizado profundo: Our library supports seamless integration between three of the most popular deep learning libraries: `),kn=a(Gn,"A",{href:!0,rel:!0});var Fj=l(kn);d6=o(Fj,"PyTorch"),Fj.forEach(t),s6=o(Gn,", "),Cn=a(Gn,"A",{href:!0,rel:!0});var Wj=l(Cn);h6=o(Wj,"TensorFlow"),Wj.forEach(t),c6=o(Gn," e "),In=a(Gn,"A",{href:!0,rel:!0});var Jj=l(In);g6=o(Jj,"JAX"),Jj.forEach(t),f6=o(Gn,`. Treine seu modelo em tr\xEAs linhas de c\xF3digo em um framework, e carregue-o para execu\xE7\xE3o em outro.`),Gn.forEach(t),Pp=s(u),ed=a(u,"P",{});var Yj=l(ed);u6=o(Yj,"Cada arquitetura \u{1F917} Transformers \xE9 definida em um m\xF3dulo individual do Python, para que seja facilmente customiz\xE1vel para pesquisa e experimentos."),Yj.forEach(t),Gp=s(u),Ra=a(u,"H2",{class:!0});var Zp=l(Ra);Pa=a(Zp,"A",{id:!0,class:!0,href:!0});var Kj=l(Pa);Z3=a(Kj,"SPAN",{});var Vj=l(Z3);pp(Bn.$$.fragment,Vj),Vj.forEach(t),Kj.forEach(t),v6=s(Zp),X3=a(Zp,"SPAN",{});var Zj=l(X3);E6=o(Zj,"Se voc\xEA estiver procurando suporte do time da Hugging Face, acesse"),Zj.forEach(t),Zp.forEach(t),Np=s(u),ya=a(u,"A",{target:!0,href:!0});var pj=l(ya);ua=a(pj,"IMG",{alt:!0,src:!0,style:!0}),T6=o(pj,`&lt;/img> `),pj.forEach(t),kp=a(u,"BR",{}),Cp=s(u),ba=a(u,"H2",{class:!0});var Xp=l(ba);Ga=a(Xp,"A",{id:!0,class:!0,href:!0});var Xj=l(Ga);z3=a(Xj,"SPAN",{});var zj=l(z3);pp(xn.$$.fragment,zj),zj.forEach(t),Xj.forEach(t),_6=s(Xp),H3=a(Xp,"SPAN",{});var Hj=l(H3);m6=o(Hj,"Conte\xFAdo"),Hj.forEach(t),Xp.forEach(t),Ip=s(u),td=a(u,"P",{});var Uj=l(td);p6=o(Uj,"A documenta\xE7\xE3o \xE9 dividida em cinco partes:"),Uj.forEach(t),Bp=s(u),T=a(u,"UL",{});var _=l(T);U3=a(_,"LI",{});var qj=l(U3);rd=a(qj,"P",{});var Dj=l(rd);q3=a(Dj,"STRONG",{});var jj=l(q3);D6=o(jj,"IN\xCDCIO"),jj.forEach(t),A6=o(Dj," cont\xE9m um tour r\xE1pido de instala\xE7\xE3o e instru\xE7\xF5es para te dar um empurr\xE3o inicial com os \u{1F917} Transformers."),Dj.forEach(t),qj.forEach(t),R6=s(_),j3=a(_,"LI",{});var Qj=l(j3);ad=a(Qj,"P",{});var Aj=l(ad);Q3=a(Aj,"STRONG",{});var $j=l(Q3);y6=o($j,"TUTORIAIS"),$j.forEach(t),b6=o(Aj,` s\xE3o perfeitos para come\xE7ar a aprender sobre a nossa biblioteca. Essa se\xE7\xE3o ir\xE1 te ajudar a desenvolver habilidades b\xE1sicas necess\xE1rias para usar o \u{1F917} Transformers.`),Aj.forEach(t),Qj.forEach(t),L6=s(_),$3=a(_,"LI",{});var eQ=l($3);ld=a(eQ,"P",{});var Rj=l(ld);e5=a(Rj,"STRONG",{});var tQ=l(e5);S6=o(tQ,"GUIAS PR\xC1TICOS"),tQ.forEach(t),w6=o(Rj,` ir\xE3o te mostrar como alcan\xE7ar um certo objetivo, como o fine-tuning de um modelo pr\xE9-treinado para modelamento de idioma, ou como criar um cabe\xE7alho personalizado para um modelo.`),Rj.forEach(t),eQ.forEach(t),M6=s(_),t5=a(_,"LI",{});var rQ=l(t5);nd=a(rQ,"P",{});var yj=l(nd);r5=a(yj,"STRONG",{});var aQ=l(r5);P6=o(aQ,"GUIAS CONCEITUAIS"),aQ.forEach(t),G6=o(yj,` te dar\xE3o mais discuss\xF5es e explica\xE7\xF5es dos conceitos fundamentais e id\xE9ias por tr\xE1s dos modelos, tarefas e da filosofia de design por tr\xE1s do \u{1F917} Transformers.`),yj.forEach(t),rQ.forEach(t),N6=s(_),a5=a(_,"LI",{});var lQ=l(a5);od=a(lQ,"P",{});var bj=l(od);l5=a(bj,"STRONG",{});var nQ=l(l5);k6=o(nQ,"API"),nQ.forEach(t),C6=o(bj," descreve o funcionamento de cada classe e fun\xE7\xE3o, agrupada em:"),bj.forEach(t),lQ.forEach(t),I6=s(_),n5=a(_,"LI",{});var oQ=l(n5);id=a(oQ,"P",{});var Lj=l(id);o5=a(Lj,"STRONG",{});var iQ=l(o5);B6=o(iQ,"CLASSES PRINCIPAIS"),iQ.forEach(t),x6=o(Lj," para as classes que exp\xF5e as APIs importantes da biblioteca."),Lj.forEach(t),oQ.forEach(t),O6=s(_),i5=a(_,"LI",{});var dQ=l(i5);dd=a(dQ,"P",{});var Sj=l(dd);d5=a(Sj,"STRONG",{});var sQ=l(d5);F6=o(sQ,"MODELOS"),sQ.forEach(t),W6=o(Sj," para as classes e fun\xE7\xF5es relacionadas \xE0 cada modelo implementado na biblioteca."),Sj.forEach(t),dQ.forEach(t),J6=s(_),s5=a(_,"LI",{});var hQ=l(s5);sd=a(hQ,"P",{});var wj=l(sd);h5=a(wj,"STRONG",{});var cQ=l(h5);Y6=o(cQ,"AUXILIARES INTERNOS"),cQ.forEach(t),K6=o(wj," para as classes e fun\xE7\xF5es usadas internamente."),wj.forEach(t),hQ.forEach(t),_.forEach(t),xp=s(u),hd=a(u,"P",{});var gQ=l(hd);V6=o(gQ,"Atualmente a biblioteca cont\xE9m implementa\xE7\xF5es do PyTorch, TensorFlow e JAX, pesos para modelos pr\xE9-treinados e scripts de uso e convers\xE3o de utilidades para os seguintes modelos:"),gQ.forEach(t),Op=s(u),La=a(u,"H3",{class:!0});var zp=l(La);Na=a(zp,"A",{id:!0,class:!0,href:!0});var fQ=l(Na);c5=a(fQ,"SPAN",{});var uQ=l(c5);pp(On.$$.fragment,uQ),uQ.forEach(t),fQ.forEach(t),Z6=s(zp),g5=a(zp,"SPAN",{});var vQ=l(g5);X6=o(vQ,"Modelos atuais"),vQ.forEach(t),zp.forEach(t),Fp=s(u),h=a(u,"OL",{});var c=l(h);ka=a(c,"LI",{});var bm=l(ka);f5=a(bm,"STRONG",{});var EQ=l(f5);cd=a(EQ,"A",{href:!0});var TQ=l(cd);z6=o(TQ,"ALBERT"),TQ.forEach(t),EQ.forEach(t),H6=o(bm," (from Google Research and the Toyota Technological Institute at Chicago) released with the paper "),Fn=a(bm,"A",{href:!0,rel:!0});var _Q=l(Fn);U6=o(_Q,"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations"),_Q.forEach(t),q6=o(bm,", by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."),bm.forEach(t),j6=s(c),Ca=a(c,"LI",{});var Lm=l(Ca);u5=a(Lm,"STRONG",{});var mQ=l(u5);gd=a(mQ,"A",{href:!0});var pQ=l(gd);Q6=o(pQ,"BART"),pQ.forEach(t),mQ.forEach(t),$6=o(Lm," (from Facebook) released with the paper "),Wn=a(Lm,"A",{href:!0,rel:!0});var DQ=l(Wn);e7=o(DQ,"BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension"),DQ.forEach(t),t7=o(Lm," by Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov and Luke Zettlemoyer."),Lm.forEach(t),r7=s(c),Ia=a(c,"LI",{});var Sm=l(Ia);v5=a(Sm,"STRONG",{});var AQ=l(v5);fd=a(AQ,"A",{href:!0});var RQ=l(fd);a7=o(RQ,"BARThez"),RQ.forEach(t),AQ.forEach(t),l7=o(Sm," (from \xC9cole polytechnique) released with the paper "),Jn=a(Sm,"A",{href:!0,rel:!0});var yQ=l(Jn);n7=o(yQ,"BARThez: a Skilled Pretrained French Sequence-to-Sequence Model"),yQ.forEach(t),o7=o(Sm," by Moussa Kamal Eddine, Antoine J.-P. Tixier, Michalis Vazirgiannis."),Sm.forEach(t),i7=s(c),Ba=a(c,"LI",{});var wm=l(Ba);E5=a(wm,"STRONG",{});var bQ=l(E5);ud=a(bQ,"A",{href:!0});var LQ=l(ud);d7=o(LQ,"BARTpho"),LQ.forEach(t),bQ.forEach(t),s7=o(wm," (from VinAI Research) released with the paper "),Yn=a(wm,"A",{href:!0,rel:!0});var SQ=l(Yn);h7=o(SQ,"BARTpho: Pre-trained Sequence-to-Sequence Models for Vietnamese"),SQ.forEach(t),c7=o(wm," by Nguyen Luong Tran, Duong Minh Le and Dat Quoc Nguyen."),wm.forEach(t),g7=s(c),xa=a(c,"LI",{});var Mm=l(xa);T5=a(Mm,"STRONG",{});var wQ=l(T5);vd=a(wQ,"A",{href:!0});var MQ=l(vd);f7=o(MQ,"BEiT"),MQ.forEach(t),wQ.forEach(t),u7=o(Mm," (from Microsoft) released with the paper "),Kn=a(Mm,"A",{href:!0,rel:!0});var PQ=l(Kn);v7=o(PQ,"BEiT: BERT Pre-Training of Image Transformers"),PQ.forEach(t),E7=o(Mm," by Hangbo Bao, Li Dong, Furu Wei."),Mm.forEach(t),T7=s(c),Oa=a(c,"LI",{});var Pm=l(Oa);_5=a(Pm,"STRONG",{});var GQ=l(_5);Ed=a(GQ,"A",{href:!0});var NQ=l(Ed);_7=o(NQ,"BERT"),NQ.forEach(t),GQ.forEach(t),m7=o(Pm," (from Google) released with the paper "),Vn=a(Pm,"A",{href:!0,rel:!0});var kQ=l(Vn);p7=o(kQ,"BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding"),kQ.forEach(t),D7=o(Pm," by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova."),Pm.forEach(t),A7=s(c),Fa=a(c,"LI",{});var Gm=l(Fa);m5=a(Gm,"STRONG",{});var CQ=l(m5);Td=a(CQ,"A",{href:!0});var IQ=l(Td);R7=o(IQ,"BERTweet"),IQ.forEach(t),CQ.forEach(t),y7=o(Gm," (from VinAI Research) released with the paper "),Zn=a(Gm,"A",{href:!0,rel:!0});var BQ=l(Zn);b7=o(BQ,"BERTweet: A pre-trained language model for English Tweets"),BQ.forEach(t),L7=o(Gm," by Dat Quoc Nguyen, Thanh Vu and Anh Tuan Nguyen."),Gm.forEach(t),S7=s(c),Wa=a(c,"LI",{});var Nm=l(Wa);p5=a(Nm,"STRONG",{});var xQ=l(p5);_d=a(xQ,"A",{href:!0});var OQ=l(_d);w7=o(OQ,"BERT For Sequence Generation"),OQ.forEach(t),xQ.forEach(t),M7=o(Nm," (from Google) released with the paper "),Xn=a(Nm,"A",{href:!0,rel:!0});var FQ=l(Xn);P7=o(FQ,"Leveraging Pre-trained Checkpoints for Sequence Generation Tasks"),FQ.forEach(t),G7=o(Nm," by Sascha Rothe, Shashi Narayan, Aliaksei Severyn."),Nm.forEach(t),N7=s(c),Ja=a(c,"LI",{});var km=l(Ja);D5=a(km,"STRONG",{});var WQ=l(D5);md=a(WQ,"A",{href:!0});var JQ=l(md);k7=o(JQ,"BigBird-RoBERTa"),JQ.forEach(t),WQ.forEach(t),C7=o(km," (from Google Research) released with the paper "),zn=a(km,"A",{href:!0,rel:!0});var YQ=l(zn);I7=o(YQ,"Big Bird: Transformers for Longer Sequences"),YQ.forEach(t),B7=o(km," by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed."),km.forEach(t),x7=s(c),Ya=a(c,"LI",{});var Cm=l(Ya);A5=a(Cm,"STRONG",{});var KQ=l(A5);pd=a(KQ,"A",{href:!0});var VQ=l(pd);O7=o(VQ,"BigBird-Pegasus"),VQ.forEach(t),KQ.forEach(t),F7=o(Cm," (from Google Research) released with the paper "),Hn=a(Cm,"A",{href:!0,rel:!0});var ZQ=l(Hn);W7=o(ZQ,"Big Bird: Transformers for Longer Sequences"),ZQ.forEach(t),J7=o(Cm," by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed."),Cm.forEach(t),Y7=s(c),Ka=a(c,"LI",{});var Im=l(Ka);R5=a(Im,"STRONG",{});var XQ=l(R5);Dd=a(XQ,"A",{href:!0});var zQ=l(Dd);K7=o(zQ,"Blenderbot"),zQ.forEach(t),XQ.forEach(t),V7=o(Im," (from Facebook) released with the paper "),Un=a(Im,"A",{href:!0,rel:!0});var HQ=l(Un);Z7=o(HQ,"Recipes for building an open-domain chatbot"),HQ.forEach(t),X7=o(Im," by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston."),Im.forEach(t),z7=s(c),Va=a(c,"LI",{});var Bm=l(Va);y5=a(Bm,"STRONG",{});var UQ=l(y5);Ad=a(UQ,"A",{href:!0});var qQ=l(Ad);H7=o(qQ,"BlenderbotSmall"),qQ.forEach(t),UQ.forEach(t),U7=o(Bm," (from Facebook) released with the paper "),qn=a(Bm,"A",{href:!0,rel:!0});var jQ=l(qn);q7=o(jQ,"Recipes for building an open-domain chatbot"),jQ.forEach(t),j7=o(Bm," by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston."),Bm.forEach(t),Q7=s(c),Za=a(c,"LI",{});var xm=l(Za);b5=a(xm,"STRONG",{});var QQ=l(b5);Rd=a(QQ,"A",{href:!0});var $Q=l(Rd);$7=o($Q,"BORT"),$Q.forEach(t),QQ.forEach(t),e8=o(xm," (from Alexa) released with the paper "),jn=a(xm,"A",{href:!0,rel:!0});var e$=l(jn);t8=o(e$,"Optimal Subarchitecture Extraction For BERT"),e$.forEach(t),r8=o(xm," by Adrian de Wynter and Daniel J. Perry."),xm.forEach(t),a8=s(c),Xa=a(c,"LI",{});var Om=l(Xa);L5=a(Om,"STRONG",{});var t$=l(L5);yd=a(t$,"A",{href:!0});var r$=l(yd);l8=o(r$,"ByT5"),r$.forEach(t),t$.forEach(t),n8=o(Om," (from Google Research) released with the paper "),Qn=a(Om,"A",{href:!0,rel:!0});var a$=l(Qn);o8=o(a$,"ByT5: Towards a token-free future with pre-trained byte-to-byte models"),a$.forEach(t),i8=o(Om," by Linting Xue, Aditya Barua, Noah Constant, Rami Al-Rfou, Sharan Narang, Mihir Kale, Adam Roberts, Colin Raffel."),Om.forEach(t),d8=s(c),va=a(c,"LI",{});var zi=l(va);S5=a(zi,"STRONG",{});var l$=l(S5);bd=a(l$,"A",{href:!0});var n$=l(bd);s8=o(n$,"CamemBERT"),n$.forEach(t),l$.forEach(t),h8=o(zi," (from Inria/Facebook/Sorbonne) released with the paper "),$n=a(zi,"A",{href:!0,rel:!0});var o$=l($n);c8=o(o$,"CamemBERT: a Tasty French Language Model"),o$.forEach(t),g8=o(zi," by Louis Martin"),w5=a(zi,"EM",{});var i$=l(w5);f8=o(i$,", Benjamin Muller"),i$.forEach(t),u8=o(zi,", Pedro Javier Ortiz Su\xE1rez*, Yoann Dupont, Laurent Romary, \xC9ric Villemonte de la Clergerie, Djam\xE9 Seddah and Beno\xEEt Sagot."),zi.forEach(t),v8=s(c),za=a(c,"LI",{});var Fm=l(za);M5=a(Fm,"STRONG",{});var d$=l(M5);Ld=a(d$,"A",{href:!0});var s$=l(Ld);E8=o(s$,"CANINE"),s$.forEach(t),d$.forEach(t),T8=o(Fm," (from Google Research) released with the paper "),eo=a(Fm,"A",{href:!0,rel:!0});var h$=l(eo);_8=o(h$,"CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation"),h$.forEach(t),m8=o(Fm," by Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting."),Fm.forEach(t),p8=s(c),Ha=a(c,"LI",{});var Wm=l(Ha);P5=a(Wm,"STRONG",{});var c$=l(P5);Sd=a(c$,"A",{href:!0});var g$=l(Sd);D8=o(g$,"ConvNeXT"),g$.forEach(t),c$.forEach(t),A8=o(Wm," (from Facebook AI) released with the paper "),to=a(Wm,"A",{href:!0,rel:!0});var f$=l(to);R8=o(f$,"A ConvNet for the 2020s"),f$.forEach(t),y8=o(Wm," by Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie."),Wm.forEach(t),b8=s(c),Ua=a(c,"LI",{});var Jm=l(Ua);G5=a(Jm,"STRONG",{});var u$=l(G5);wd=a(u$,"A",{href:!0});var v$=l(wd);L8=o(v$,"CLIP"),v$.forEach(t),u$.forEach(t),S8=o(Jm," (from OpenAI) released with the paper "),ro=a(Jm,"A",{href:!0,rel:!0});var E$=l(ro);w8=o(E$,"Learning Transferable Visual Models From Natural Language Supervision"),E$.forEach(t),M8=o(Jm," by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever."),Jm.forEach(t),P8=s(c),qa=a(c,"LI",{});var Ym=l(qa);N5=a(Ym,"STRONG",{});var T$=l(N5);Md=a(T$,"A",{href:!0});var _$=l(Md);G8=o(_$,"ConvBERT"),_$.forEach(t),T$.forEach(t),N8=o(Ym," (from YituTech) released with the paper "),ao=a(Ym,"A",{href:!0,rel:!0});var m$=l(ao);k8=o(m$,"ConvBERT: Improving BERT with Span-based Dynamic Convolution"),m$.forEach(t),C8=o(Ym," by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan."),Ym.forEach(t),I8=s(c),ja=a(c,"LI",{});var Km=l(ja);k5=a(Km,"STRONG",{});var p$=l(k5);Pd=a(p$,"A",{href:!0});var D$=l(Pd);B8=o(D$,"CPM"),D$.forEach(t),p$.forEach(t),x8=o(Km," (from Tsinghua University) released with the paper "),lo=a(Km,"A",{href:!0,rel:!0});var A$=l(lo);O8=o(A$,"CPM: A Large-scale Generative Chinese Pre-trained Language Model"),A$.forEach(t),F8=o(Km," by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun."),Km.forEach(t),W8=s(c),Ea=a(c,"LI",{});var Hi=l(Ea);C5=a(Hi,"STRONG",{});var R$=l(C5);Gd=a(R$,"A",{href:!0});var y$=l(Gd);J8=o(y$,"CTRL"),y$.forEach(t),R$.forEach(t),Y8=o(Hi," (from Salesforce) released with the paper "),no=a(Hi,"A",{href:!0,rel:!0});var b$=l(no);K8=o(b$,"CTRL: A Conditional Transformer Language Model for Controllable Generation"),b$.forEach(t),V8=o(Hi," by Nitish Shirish Keskar"),I5=a(Hi,"EM",{});var L$=l(I5);Z8=o(L$,", Bryan McCann"),L$.forEach(t),X8=o(Hi,", Lav R. Varshney, Caiming Xiong and Richard Socher."),Hi.forEach(t),z8=s(c),Qa=a(c,"LI",{});var Vm=l(Qa);B5=a(Vm,"STRONG",{});var S$=l(B5);Nd=a(S$,"A",{href:!0});var w$=l(Nd);H8=o(w$,"Data2Vec"),w$.forEach(t),S$.forEach(t),U8=o(Vm," (from Facebook) released with the paper "),oo=a(Vm,"A",{href:!0,rel:!0});var M$=l(oo);q8=o(M$,"Data2Vec: A General Framework for Self-supervised Learning in Speech, Vision and Language"),M$.forEach(t),j8=o(Vm," by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu, Michael Auli."),Vm.forEach(t),Q8=s(c),$a=a(c,"LI",{});var Zm=l($a);x5=a(Zm,"STRONG",{});var P$=l(x5);kd=a(P$,"A",{href:!0});var G$=l(kd);$8=o(G$,"DeBERTa"),G$.forEach(t),P$.forEach(t),e9=o(Zm," (from Microsoft) released with the paper "),io=a(Zm,"A",{href:!0,rel:!0});var N$=l(io);t9=o(N$,"DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),N$.forEach(t),r9=o(Zm," by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen."),Zm.forEach(t),a9=s(c),el=a(c,"LI",{});var Xm=l(el);O5=a(Xm,"STRONG",{});var k$=l(O5);Cd=a(k$,"A",{href:!0});var C$=l(Cd);l9=o(C$,"DeBERTa-v2"),C$.forEach(t),k$.forEach(t),n9=o(Xm," (from Microsoft) released with the paper "),so=a(Xm,"A",{href:!0,rel:!0});var I$=l(so);o9=o(I$,"DeBERTa: Decoding-enhanced BERT with Disentangled Attention"),I$.forEach(t),i9=o(Xm," by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen."),Xm.forEach(t),d9=s(c),tl=a(c,"LI",{});var zm=l(tl);F5=a(zm,"STRONG",{});var B$=l(F5);Id=a(B$,"A",{href:!0});var x$=l(Id);s9=o(x$,"Decision Transformer"),x$.forEach(t),B$.forEach(t),h9=o(zm," (from Berkeley/Facebook/Google) released with the paper "),ho=a(zm,"A",{href:!0,rel:!0});var O$=l(ho);c9=o(O$,"Decision Transformer: Reinforcement Learning via Sequence Modeling"),O$.forEach(t),g9=o(zm," by Lili Chen, Kevin Lu, Aravind Rajeswaran, Kimin Lee, Aditya Grover, Michael Laskin, Pieter Abbeel, Aravind Srinivas, Igor Mordatch."),zm.forEach(t),f9=s(c),rl=a(c,"LI",{});var Hm=l(rl);W5=a(Hm,"STRONG",{});var F$=l(W5);Bd=a(F$,"A",{href:!0});var W$=l(Bd);u9=o(W$,"DiT"),W$.forEach(t),F$.forEach(t),v9=o(Hm," (from Microsoft Research) released with the paper "),co=a(Hm,"A",{href:!0,rel:!0});var J$=l(co);E9=o(J$,"DiT: Self-supervised Pre-training for Document Image Transformer"),J$.forEach(t),T9=o(Hm," by Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei."),Hm.forEach(t),_9=s(c),al=a(c,"LI",{});var Um=l(al);J5=a(Um,"STRONG",{});var Y$=l(J5);xd=a(Y$,"A",{href:!0});var K$=l(xd);m9=o(K$,"DeiT"),K$.forEach(t),Y$.forEach(t),p9=o(Um," (from Facebook) released with the paper "),go=a(Um,"A",{href:!0,rel:!0});var V$=l(go);D9=o(V$,"Training data-efficient image transformers & distillation through attention"),V$.forEach(t),A9=o(Um," by Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Herv\xE9 J\xE9gou."),Um.forEach(t),R9=s(c),ll=a(c,"LI",{});var qm=l(ll);Y5=a(qm,"STRONG",{});var Z$=l(Y5);Od=a(Z$,"A",{href:!0});var X$=l(Od);y9=o(X$,"DETR"),X$.forEach(t),Z$.forEach(t),b9=o(qm," (from Facebook) released with the paper "),fo=a(qm,"A",{href:!0,rel:!0});var z$=l(fo);L9=o(z$,"End-to-End Object Detection with Transformers"),z$.forEach(t),S9=o(qm," by Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, Sergey Zagoruyko."),qm.forEach(t),w9=s(c),nl=a(c,"LI",{});var jm=l(nl);K5=a(jm,"STRONG",{});var H$=l(K5);Fd=a(H$,"A",{href:!0});var U$=l(Fd);M9=o(U$,"DialoGPT"),U$.forEach(t),H$.forEach(t),P9=o(jm," (from Microsoft Research) released with the paper "),uo=a(jm,"A",{href:!0,rel:!0});var q$=l(uo);G9=o(q$,"DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation"),q$.forEach(t),N9=o(jm," by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan."),jm.forEach(t),k9=s(c),ut=a(c,"LI",{});var fa=l(ut);V5=a(fa,"STRONG",{});var j$=l(V5);Wd=a(j$,"A",{href:!0});var Q$=l(Wd);C9=o(Q$,"DistilBERT"),Q$.forEach(t),j$.forEach(t),I9=o(fa," (from HuggingFace), released together with the paper "),vo=a(fa,"A",{href:!0,rel:!0});var $$=l(vo);B9=o($$,"DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter"),$$.forEach(t),x9=o(fa," by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into "),Eo=a(fa,"A",{href:!0,rel:!0});var eee=l(Eo);O9=o(eee,"DistilGPT2"),eee.forEach(t),F9=o(fa,", RoBERTa into "),To=a(fa,"A",{href:!0,rel:!0});var tee=l(To);W9=o(tee,"DistilRoBERTa"),tee.forEach(t),J9=o(fa,", Multilingual BERT into "),_o=a(fa,"A",{href:!0,rel:!0});var ree=l(_o);Y9=o(ree,"DistilmBERT"),ree.forEach(t),K9=o(fa," and a German version of DistilBERT."),fa.forEach(t),V9=s(c),ol=a(c,"LI",{});var Qm=l(ol);Z5=a(Qm,"STRONG",{});var aee=l(Z5);Jd=a(aee,"A",{href:!0});var lee=l(Jd);Z9=o(lee,"DPR"),lee.forEach(t),aee.forEach(t),X9=o(Qm," (from Facebook) released with the paper "),mo=a(Qm,"A",{href:!0,rel:!0});var nee=l(mo);z9=o(nee,"Dense Passage Retrieval for Open-Domain Question Answering"),nee.forEach(t),H9=o(Qm," by Vladimir Karpukhin, Barlas O\u011Fuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih."),Qm.forEach(t),U9=s(c),il=a(c,"LI",{});var $m=l(il);X5=a($m,"STRONG",{});var oee=l(X5);Yd=a(oee,"A",{href:!0});var iee=l(Yd);q9=o(iee,"DPT"),iee.forEach(t),oee.forEach(t),j9=o($m," (from Intel Labs) released with the paper "),po=a($m,"A",{href:!0,rel:!0});var dee=l(po);Q9=o(dee,"Vision Transformers for Dense Prediction"),dee.forEach(t),$9=o($m," by Ren\xE9 Ranftl, Alexey Bochkovskiy, Vladlen Koltun."),$m.forEach(t),eD=s(c),dl=a(c,"LI",{});var e0=l(dl);z5=a(e0,"STRONG",{});var see=l(z5);Kd=a(see,"A",{href:!0});var hee=l(Kd);tD=o(hee,"EncoderDecoder"),hee.forEach(t),see.forEach(t),rD=o(e0," (from Google Research) released with the paper "),Do=a(e0,"A",{href:!0,rel:!0});var cee=l(Do);aD=o(cee,"Leveraging Pre-trained Checkpoints for Sequence Generation Tasks"),cee.forEach(t),lD=o(e0," by Sascha Rothe, Shashi Narayan, Aliaksei Severyn."),e0.forEach(t),nD=s(c),sl=a(c,"LI",{});var t0=l(sl);H5=a(t0,"STRONG",{});var gee=l(H5);Vd=a(gee,"A",{href:!0});var fee=l(Vd);oD=o(fee,"ELECTRA"),fee.forEach(t),gee.forEach(t),iD=o(t0," (from Google Research/Stanford University) released with the paper "),Ao=a(t0,"A",{href:!0,rel:!0});var uee=l(Ao);dD=o(uee,"ELECTRA: Pre-training text encoders as discriminators rather than generators"),uee.forEach(t),sD=o(t0," by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning."),t0.forEach(t),hD=s(c),hl=a(c,"LI",{});var r0=l(hl);U5=a(r0,"STRONG",{});var vee=l(U5);Zd=a(vee,"A",{href:!0});var Eee=l(Zd);cD=o(Eee,"FlauBERT"),Eee.forEach(t),vee.forEach(t),gD=o(r0," (from CNRS) released with the paper "),Ro=a(r0,"A",{href:!0,rel:!0});var Tee=l(Ro);fD=o(Tee,"FlauBERT: Unsupervised Language Model Pre-training for French"),Tee.forEach(t),uD=o(r0," by Hang Le, Lo\xEFc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Beno\xEEt Crabb\xE9, Laurent Besacier, Didier Schwab."),r0.forEach(t),vD=s(c),cl=a(c,"LI",{});var a0=l(cl);q5=a(a0,"STRONG",{});var _ee=l(q5);Xd=a(_ee,"A",{href:!0});var mee=l(Xd);ED=o(mee,"FNet"),mee.forEach(t),_ee.forEach(t),TD=o(a0," (from Google Research) released with the paper "),yo=a(a0,"A",{href:!0,rel:!0});var pee=l(yo);_D=o(pee,"FNet: Mixing Tokens with Fourier Transforms"),pee.forEach(t),mD=o(a0," by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon."),a0.forEach(t),pD=s(c),gl=a(c,"LI",{});var l0=l(gl);j5=a(l0,"STRONG",{});var Dee=l(j5);zd=a(Dee,"A",{href:!0});var Aee=l(zd);DD=o(Aee,"Funnel Transformer"),Aee.forEach(t),Dee.forEach(t),AD=o(l0," (from CMU/Google Brain) released with the paper "),bo=a(l0,"A",{href:!0,rel:!0});var Ree=l(bo);RD=o(Ree,"Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing"),Ree.forEach(t),yD=o(l0," by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le."),l0.forEach(t),bD=s(c),fl=a(c,"LI",{});var n0=l(fl);Q5=a(n0,"STRONG",{});var yee=l(Q5);Hd=a(yee,"A",{href:!0});var bee=l(Hd);LD=o(bee,"GLPN"),bee.forEach(t),yee.forEach(t),SD=o(n0," (from KAIST) released with the paper "),Lo=a(n0,"A",{href:!0,rel:!0});var Lee=l(Lo);wD=o(Lee,"Global-Local Path Networks for Monocular Depth Estimation with Vertical CutDepth"),Lee.forEach(t),MD=o(n0," by Doyeon Kim, Woonghyun Ga, Pyungwhan Ahn, Donggyu Joo, Sehwan Chun, Junmo Kim."),n0.forEach(t),PD=s(c),ul=a(c,"LI",{});var o0=l(ul);$5=a(o0,"STRONG",{});var See=l($5);Ud=a(See,"A",{href:!0});var wee=l(Ud);GD=o(wee,"GPT"),wee.forEach(t),See.forEach(t),ND=o(o0," (from OpenAI) released with the paper "),So=a(o0,"A",{href:!0,rel:!0});var Mee=l(So);kD=o(Mee,"Improving Language Understanding by Generative Pre-Training"),Mee.forEach(t),CD=o(o0," by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever."),o0.forEach(t),ID=s(c),Tt=a(c,"LI",{});var wa=l(Tt);e4=a(wa,"STRONG",{});var Pee=l(e4);qd=a(Pee,"A",{href:!0});var Gee=l(qd);BD=o(Gee,"GPT-2"),Gee.forEach(t),Pee.forEach(t),xD=o(wa," (from OpenAI) released with the paper "),wo=a(wa,"A",{href:!0,rel:!0});var Nee=l(wo);OD=o(Nee,"Language Models are Unsupervised Multitask Learners"),Nee.forEach(t),FD=o(wa," by Alec Radford"),t4=a(wa,"EM",{});var kee=l(t4);WD=o(kee,", Jeffrey Wu"),kee.forEach(t),JD=o(wa,", Rewon Child, David Luan, Dario Amodei"),r4=a(wa,"STRONG",{});var Cee=l(r4);YD=o(Cee,"and Ilya Sutskever"),Cee.forEach(t),KD=o(wa,"."),wa.forEach(t),VD=s(c),vl=a(c,"LI",{});var i0=l(vl);a4=a(i0,"STRONG",{});var Iee=l(a4);jd=a(Iee,"A",{href:!0});var Bee=l(jd);ZD=o(Bee,"GPT-J"),Bee.forEach(t),Iee.forEach(t),XD=o(i0," (from EleutherAI) released in the repository "),Mo=a(i0,"A",{href:!0,rel:!0});var xee=l(Mo);zD=o(xee,"kingoflolz/mesh-transformer-jax"),xee.forEach(t),HD=o(i0," by Ben Wang and Aran Komatsuzaki."),i0.forEach(t),UD=s(c),El=a(c,"LI",{});var d0=l(El);l4=a(d0,"STRONG",{});var Oee=l(l4);Qd=a(Oee,"A",{href:!0});var Fee=l(Qd);qD=o(Fee,"GPT Neo"),Fee.forEach(t),Oee.forEach(t),jD=o(d0," (from EleutherAI) released in the repository "),Po=a(d0,"A",{href:!0,rel:!0});var Wee=l(Po);QD=o(Wee,"EleutherAI/gpt-neo"),Wee.forEach(t),$D=o(d0," by Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy."),d0.forEach(t),eA=s(c),Tl=a(c,"LI",{});var s0=l(Tl);n4=a(s0,"STRONG",{});var Jee=l(n4);$d=a(Jee,"A",{href:!0});var Yee=l($d);tA=o(Yee,"Hubert"),Yee.forEach(t),Jee.forEach(t),rA=o(s0," (from Facebook) released with the paper "),Go=a(s0,"A",{href:!0,rel:!0});var Kee=l(Go);aA=o(Kee,"HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units"),Kee.forEach(t),lA=o(s0," by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed."),s0.forEach(t),nA=s(c),_l=a(c,"LI",{});var h0=l(_l);o4=a(h0,"STRONG",{});var Vee=l(o4);es=a(Vee,"A",{href:!0});var Zee=l(es);oA=o(Zee,"I-BERT"),Zee.forEach(t),Vee.forEach(t),iA=o(h0," (from Berkeley) released with the paper "),No=a(h0,"A",{href:!0,rel:!0});var Xee=l(No);dA=o(Xee,"I-BERT: Integer-only BERT Quantization"),Xee.forEach(t),sA=o(h0," by Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W. Mahoney, Kurt Keutzer."),h0.forEach(t),hA=s(c),ml=a(c,"LI",{});var c0=l(ml);i4=a(c0,"STRONG",{});var zee=l(i4);ts=a(zee,"A",{href:!0});var Hee=l(ts);cA=o(Hee,"ImageGPT"),Hee.forEach(t),zee.forEach(t),gA=o(c0," (from OpenAI) released with the paper "),ko=a(c0,"A",{href:!0,rel:!0});var Uee=l(ko);fA=o(Uee,"Generative Pretraining from Pixels"),Uee.forEach(t),uA=o(c0," by Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, Ilya Sutskever."),c0.forEach(t),vA=s(c),pl=a(c,"LI",{});var g0=l(pl);d4=a(g0,"STRONG",{});var qee=l(d4);rs=a(qee,"A",{href:!0});var jee=l(rs);EA=o(jee,"LayoutLM"),jee.forEach(t),qee.forEach(t),TA=o(g0," (from Microsoft Research Asia) released with the paper "),Co=a(g0,"A",{href:!0,rel:!0});var Qee=l(Co);_A=o(Qee,"LayoutLM: Pre-training of Text and Layout for Document Image Understanding"),Qee.forEach(t),mA=o(g0," by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou."),g0.forEach(t),pA=s(c),Dl=a(c,"LI",{});var f0=l(Dl);s4=a(f0,"STRONG",{});var $ee=l(s4);as=a($ee,"A",{href:!0});var ete=l(as);DA=o(ete,"LayoutLMv2"),ete.forEach(t),$ee.forEach(t),AA=o(f0," (from Microsoft Research Asia) released with the paper "),Io=a(f0,"A",{href:!0,rel:!0});var tte=l(Io);RA=o(tte,"LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding"),tte.forEach(t),yA=o(f0," by Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou."),f0.forEach(t),bA=s(c),Al=a(c,"LI",{});var u0=l(Al);h4=a(u0,"STRONG",{});var rte=l(h4);ls=a(rte,"A",{href:!0});var ate=l(ls);LA=o(ate,"LayoutXLM"),ate.forEach(t),rte.forEach(t),SA=o(u0," (from Microsoft Research Asia) released with the paper "),Bo=a(u0,"A",{href:!0,rel:!0});var lte=l(Bo);wA=o(lte,"LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding"),lte.forEach(t),MA=o(u0," by Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei."),u0.forEach(t),PA=s(c),Rl=a(c,"LI",{});var v0=l(Rl);c4=a(v0,"STRONG",{});var nte=l(c4);ns=a(nte,"A",{href:!0});var ote=l(ns);GA=o(ote,"LED"),ote.forEach(t),nte.forEach(t),NA=o(v0," (from AllenAI) released with the paper "),xo=a(v0,"A",{href:!0,rel:!0});var ite=l(xo);kA=o(ite,"Longformer: The Long-Document Transformer"),ite.forEach(t),CA=o(v0," by Iz Beltagy, Matthew E. Peters, Arman Cohan."),v0.forEach(t),IA=s(c),yl=a(c,"LI",{});var E0=l(yl);g4=a(E0,"STRONG",{});var dte=l(g4);os=a(dte,"A",{href:!0});var ste=l(os);BA=o(ste,"Longformer"),ste.forEach(t),dte.forEach(t),xA=o(E0," (from AllenAI) released with the paper "),Oo=a(E0,"A",{href:!0,rel:!0});var hte=l(Oo);OA=o(hte,"Longformer: The Long-Document Transformer"),hte.forEach(t),FA=o(E0," by Iz Beltagy, Matthew E. Peters, Arman Cohan."),E0.forEach(t),WA=s(c),bl=a(c,"LI",{});var T0=l(bl);f4=a(T0,"STRONG",{});var cte=l(f4);is=a(cte,"A",{href:!0});var gte=l(is);JA=o(gte,"LUKE"),gte.forEach(t),cte.forEach(t),YA=o(T0," (from Studio Ousia) released with the paper "),Fo=a(T0,"A",{href:!0,rel:!0});var fte=l(Fo);KA=o(fte,"LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention"),fte.forEach(t),VA=o(T0," by Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda, Yuji Matsumoto."),T0.forEach(t),ZA=s(c),Ll=a(c,"LI",{});var _0=l(Ll);u4=a(_0,"STRONG",{});var ute=l(u4);ds=a(ute,"A",{href:!0});var vte=l(ds);XA=o(vte,"mLUKE"),vte.forEach(t),ute.forEach(t),zA=o(_0," (from Studio Ousia) released with the paper "),Wo=a(_0,"A",{href:!0,rel:!0});var Ete=l(Wo);HA=o(Ete,"mLUKE: The Power of Entity Representations in Multilingual Pretrained Language Models"),Ete.forEach(t),UA=o(_0," by Ryokan Ri, Ikuya Yamada, and Yoshimasa Tsuruoka."),_0.forEach(t),qA=s(c),Sl=a(c,"LI",{});var m0=l(Sl);v4=a(m0,"STRONG",{});var Tte=l(v4);ss=a(Tte,"A",{href:!0});var _te=l(ss);jA=o(_te,"LXMERT"),_te.forEach(t),Tte.forEach(t),QA=o(m0," (from UNC Chapel Hill) released with the paper "),Jo=a(m0,"A",{href:!0,rel:!0});var mte=l(Jo);$A=o(mte,"LXMERT: Learning Cross-Modality Encoder Representations from Transformers for Open-Domain Question Answering"),mte.forEach(t),eR=o(m0," by Hao Tan and Mohit Bansal."),m0.forEach(t),tR=s(c),wl=a(c,"LI",{});var p0=l(wl);E4=a(p0,"STRONG",{});var pte=l(E4);hs=a(pte,"A",{href:!0});var Dte=l(hs);rR=o(Dte,"M2M100"),Dte.forEach(t),pte.forEach(t),aR=o(p0," (from Facebook) released with the paper "),Yo=a(p0,"A",{href:!0,rel:!0});var Ate=l(Yo);lR=o(Ate,"Beyond English-Centric Multilingual Machine Translation"),Ate.forEach(t),nR=o(p0," by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin."),p0.forEach(t),oR=s(c),Ta=a(c,"LI",{});var Ui=l(Ta);T4=a(Ui,"STRONG",{});var Rte=l(T4);cs=a(Rte,"A",{href:!0});var yte=l(cs);iR=o(yte,"MarianMT"),yte.forEach(t),Rte.forEach(t),dR=o(Ui," Machine translation models trained using "),Ko=a(Ui,"A",{href:!0,rel:!0});var bte=l(Ko);sR=o(bte,"OPUS"),bte.forEach(t),hR=o(Ui," data by J\xF6rg Tiedemann. The "),Vo=a(Ui,"A",{href:!0,rel:!0});var Lte=l(Vo);cR=o(Lte,"Marian Framework"),Lte.forEach(t),gR=o(Ui," is being developed by the Microsoft Translator Team."),Ui.forEach(t),fR=s(c),Ml=a(c,"LI",{});var D0=l(Ml);_4=a(D0,"STRONG",{});var Ste=l(_4);gs=a(Ste,"A",{href:!0});var wte=l(gs);uR=o(wte,"MaskFormer"),wte.forEach(t),Ste.forEach(t),vR=o(D0," (from Meta and UIUC) released with the paper "),Zo=a(D0,"A",{href:!0,rel:!0});var Mte=l(Zo);ER=o(Mte,"Per-Pixel Classification is Not All You Need for Semantic Segmentation"),Mte.forEach(t),TR=o(D0," by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov."),D0.forEach(t),_R=s(c),Pl=a(c,"LI",{});var A0=l(Pl);m4=a(A0,"STRONG",{});var Pte=l(m4);fs=a(Pte,"A",{href:!0});var Gte=l(fs);mR=o(Gte,"MBart"),Gte.forEach(t),Pte.forEach(t),pR=o(A0," (from Facebook) released with the paper "),Xo=a(A0,"A",{href:!0,rel:!0});var Nte=l(Xo);DR=o(Nte,"Multilingual Denoising Pre-training for Neural Machine Translation"),Nte.forEach(t),AR=o(A0," by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer."),A0.forEach(t),RR=s(c),Gl=a(c,"LI",{});var R0=l(Gl);p4=a(R0,"STRONG",{});var kte=l(p4);us=a(kte,"A",{href:!0});var Cte=l(us);yR=o(Cte,"MBart-50"),Cte.forEach(t),kte.forEach(t),bR=o(R0," (from Facebook) released with the paper "),zo=a(R0,"A",{href:!0,rel:!0});var Ite=l(zo);LR=o(Ite,"Multilingual Translation with Extensible Multilingual Pretraining and Finetuning"),Ite.forEach(t),SR=o(R0," by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan."),R0.forEach(t),wR=s(c),Nl=a(c,"LI",{});var y0=l(Nl);D4=a(y0,"STRONG",{});var Bte=l(D4);vs=a(Bte,"A",{href:!0});var xte=l(vs);MR=o(xte,"Megatron-BERT"),xte.forEach(t),Bte.forEach(t),PR=o(y0," (from NVIDIA) released with the paper "),Ho=a(y0,"A",{href:!0,rel:!0});var Ote=l(Ho);GR=o(Ote,"Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism"),Ote.forEach(t),NR=o(y0," by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro."),y0.forEach(t),kR=s(c),kl=a(c,"LI",{});var b0=l(kl);A4=a(b0,"STRONG",{});var Fte=l(A4);Es=a(Fte,"A",{href:!0});var Wte=l(Es);CR=o(Wte,"Megatron-GPT2"),Wte.forEach(t),Fte.forEach(t),IR=o(b0," (from NVIDIA) released with the paper "),Uo=a(b0,"A",{href:!0,rel:!0});var Jte=l(Uo);BR=o(Jte,"Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism"),Jte.forEach(t),xR=o(b0," by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro."),b0.forEach(t),OR=s(c),Cl=a(c,"LI",{});var L0=l(Cl);R4=a(L0,"STRONG",{});var Yte=l(R4);Ts=a(Yte,"A",{href:!0});var Kte=l(Ts);FR=o(Kte,"MPNet"),Kte.forEach(t),Yte.forEach(t),WR=o(L0," (from Microsoft Research) released with the paper "),qo=a(L0,"A",{href:!0,rel:!0});var Vte=l(qo);JR=o(Vte,"MPNet: Masked and Permuted Pre-training for Language Understanding"),Vte.forEach(t),YR=o(L0," by Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, Tie-Yan Liu."),L0.forEach(t),KR=s(c),Il=a(c,"LI",{});var S0=l(Il);y4=a(S0,"STRONG",{});var Zte=l(y4);_s=a(Zte,"A",{href:!0});var Xte=l(_s);VR=o(Xte,"MT5"),Xte.forEach(t),Zte.forEach(t),ZR=o(S0," (from Google AI) released with the paper "),jo=a(S0,"A",{href:!0,rel:!0});var zte=l(jo);XR=o(zte,"mT5: A massively multilingual pre-trained text-to-text transformer"),zte.forEach(t),zR=o(S0," by Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel."),S0.forEach(t),HR=s(c),Bl=a(c,"LI",{});var w0=l(Bl);b4=a(w0,"STRONG",{});var Hte=l(b4);ms=a(Hte,"A",{href:!0});var Ute=l(ms);UR=o(Ute,"Nystr\xF6mformer"),Ute.forEach(t),Hte.forEach(t),qR=o(w0," (from the University of Wisconsin - Madison) released with the paper "),Qo=a(w0,"A",{href:!0,rel:!0});var qte=l(Qo);jR=o(qte,"Nystr\xF6mformer: A Nystr\xF6m-Based Algorithm for Approximating Self-Attention"),qte.forEach(t),QR=o(w0," by Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, Vikas Singh."),w0.forEach(t),$R=s(c),xl=a(c,"LI",{});var M0=l(xl);L4=a(M0,"STRONG",{});var jte=l(L4);ps=a(jte,"A",{href:!0});var Qte=l(ps);ey=o(Qte,"Pegasus"),Qte.forEach(t),jte.forEach(t),ty=o(M0," (from Google) released with the paper "),$o=a(M0,"A",{href:!0,rel:!0});var $te=l($o);ry=o($te,"PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization"),$te.forEach(t),ay=o(M0," by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu."),M0.forEach(t),ly=s(c),Ol=a(c,"LI",{});var P0=l(Ol);S4=a(P0,"STRONG",{});var ere=l(S4);Ds=a(ere,"A",{href:!0});var tre=l(Ds);ny=o(tre,"Perceiver IO"),tre.forEach(t),ere.forEach(t),oy=o(P0," (from Deepmind) released with the paper "),ei=a(P0,"A",{href:!0,rel:!0});var rre=l(ei);iy=o(rre,"Perceiver IO: A General Architecture for Structured Inputs & Outputs"),rre.forEach(t),dy=o(P0," by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier H\xE9naff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, Jo\xE3o Carreira."),P0.forEach(t),sy=s(c),Fl=a(c,"LI",{});var G0=l(Fl);w4=a(G0,"STRONG",{});var are=l(w4);As=a(are,"A",{href:!0});var lre=l(As);hy=o(lre,"PhoBERT"),lre.forEach(t),are.forEach(t),cy=o(G0," (from VinAI Research) released with the paper "),ti=a(G0,"A",{href:!0,rel:!0});var nre=l(ti);gy=o(nre,"PhoBERT: Pre-trained language models for Vietnamese"),nre.forEach(t),fy=o(G0," by Dat Quoc Nguyen and Anh Tuan Nguyen."),G0.forEach(t),uy=s(c),Wl=a(c,"LI",{});var N0=l(Wl);M4=a(N0,"STRONG",{});var ore=l(M4);Rs=a(ore,"A",{href:!0});var ire=l(Rs);vy=o(ire,"PLBart"),ire.forEach(t),ore.forEach(t),Ey=o(N0," (from UCLA NLP) released with the paper "),ri=a(N0,"A",{href:!0,rel:!0});var dre=l(ri);Ty=o(dre,"Unified Pre-training for Program Understanding and Generation"),dre.forEach(t),_y=o(N0," by Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang."),N0.forEach(t),my=s(c),Jl=a(c,"LI",{});var k0=l(Jl);P4=a(k0,"STRONG",{});var sre=l(P4);ys=a(sre,"A",{href:!0});var hre=l(ys);py=o(hre,"PoolFormer"),hre.forEach(t),sre.forEach(t),Dy=o(k0," (from Sea AI Labs) released with the paper "),ai=a(k0,"A",{href:!0,rel:!0});var cre=l(ai);Ay=o(cre,"MetaFormer is Actually What You Need for Vision"),cre.forEach(t),Ry=o(k0," by Yu, Weihao and Luo, Mi and Zhou, Pan and Si, Chenyang and Zhou, Yichen and Wang, Xinchao and Feng, Jiashi and Yan, Shuicheng."),k0.forEach(t),yy=s(c),Yl=a(c,"LI",{});var C0=l(Yl);G4=a(C0,"STRONG",{});var gre=l(G4);bs=a(gre,"A",{href:!0});var fre=l(bs);by=o(fre,"ProphetNet"),fre.forEach(t),gre.forEach(t),Ly=o(C0," (from Microsoft Research) released with the paper "),li=a(C0,"A",{href:!0,rel:!0});var ure=l(li);Sy=o(ure,"ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training"),ure.forEach(t),wy=o(C0," by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou."),C0.forEach(t),My=s(c),Kl=a(c,"LI",{});var I0=l(Kl);N4=a(I0,"STRONG",{});var vre=l(N4);Ls=a(vre,"A",{href:!0});var Ere=l(Ls);Py=o(Ere,"QDQBert"),Ere.forEach(t),vre.forEach(t),Gy=o(I0," (from NVIDIA) released with the paper "),ni=a(I0,"A",{href:!0,rel:!0});var Tre=l(ni);Ny=o(Tre,"Integer Quantization for Deep Learning Inference: Principles and Empirical Evaluation"),Tre.forEach(t),ky=o(I0," by Hao Wu, Patrick Judd, Xiaojie Zhang, Mikhail Isaev and Paulius Micikevicius."),I0.forEach(t),Cy=s(c),Vl=a(c,"LI",{});var B0=l(Vl);k4=a(B0,"STRONG",{});var _re=l(k4);Ss=a(_re,"A",{href:!0});var mre=l(Ss);Iy=o(mre,"REALM"),mre.forEach(t),_re.forEach(t),By=o(B0," (from Google Research) released with the paper "),oi=a(B0,"A",{href:!0,rel:!0});var pre=l(oi);xy=o(pre,"REALM: Retrieval-Augmented Language Model Pre-Training"),pre.forEach(t),Oy=o(B0," by Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat and Ming-Wei Chang."),B0.forEach(t),Fy=s(c),Zl=a(c,"LI",{});var x0=l(Zl);C4=a(x0,"STRONG",{});var Dre=l(C4);ws=a(Dre,"A",{href:!0});var Are=l(ws);Wy=o(Are,"Reformer"),Are.forEach(t),Dre.forEach(t),Jy=o(x0," (from Google Research) released with the paper "),ii=a(x0,"A",{href:!0,rel:!0});var Rre=l(ii);Yy=o(Rre,"Reformer: The Efficient Transformer"),Rre.forEach(t),Ky=o(x0," by Nikita Kitaev, \u0141ukasz Kaiser, Anselm Levskaya."),x0.forEach(t),Vy=s(c),Xl=a(c,"LI",{});var O0=l(Xl);I4=a(O0,"STRONG",{});var yre=l(I4);Ms=a(yre,"A",{href:!0});var bre=l(Ms);Zy=o(bre,"RemBERT"),bre.forEach(t),yre.forEach(t),Xy=o(O0," (from Google Research) released with the paper "),di=a(O0,"A",{href:!0,rel:!0});var Lre=l(di);zy=o(Lre,"Rethinking embedding coupling in pre-trained language models"),Lre.forEach(t),Hy=o(O0," by Hyung Won Chung, Thibault F\xE9vry, Henry Tsai, M. Johnson, Sebastian Ruder."),O0.forEach(t),Uy=s(c),zl=a(c,"LI",{});var F0=l(zl);B4=a(F0,"STRONG",{});var Sre=l(B4);Ps=a(Sre,"A",{href:!0});var wre=l(Ps);qy=o(wre,"RegNet"),wre.forEach(t),Sre.forEach(t),jy=o(F0," (from META Platforms) released with the paper "),si=a(F0,"A",{href:!0,rel:!0});var Mre=l(si);Qy=o(Mre,"Designing Network Design Space"),Mre.forEach(t),$y=o(F0," by Ilija Radosavovic, Raj Prateek Kosaraju, Ross Girshick, Kaiming He, Piotr Doll\xE1r."),F0.forEach(t),eb=s(c),Hl=a(c,"LI",{});var W0=l(Hl);x4=a(W0,"STRONG",{});var Pre=l(x4);Gs=a(Pre,"A",{href:!0});var Gre=l(Gs);tb=o(Gre,"ResNet"),Gre.forEach(t),Pre.forEach(t),rb=o(W0," (from Microsoft Research) released with the paper "),hi=a(W0,"A",{href:!0,rel:!0});var Nre=l(hi);ab=o(Nre,"Deep Residual Learning for Image Recognition"),Nre.forEach(t),lb=o(W0," by Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun."),W0.forEach(t),nb=s(c),Ul=a(c,"LI",{});var J0=l(Ul);O4=a(J0,"STRONG",{});var kre=l(O4);Ns=a(kre,"A",{href:!0});var Cre=l(Ns);ob=o(Cre,"RoBERTa"),Cre.forEach(t),kre.forEach(t),ib=o(J0," (from Facebook), released together with the paper "),ci=a(J0,"A",{href:!0,rel:!0});var Ire=l(ci);db=o(Ire,"RoBERTa: A Robustly Optimized BERT Pretraining Approach"),Ire.forEach(t),sb=o(J0," by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov."),J0.forEach(t),hb=s(c),ql=a(c,"LI",{});var Y0=l(ql);F4=a(Y0,"STRONG",{});var Bre=l(F4);ks=a(Bre,"A",{href:!0});var xre=l(ks);cb=o(xre,"RoFormer"),xre.forEach(t),Bre.forEach(t),gb=o(Y0," (from ZhuiyiTechnology), released together with the paper "),gi=a(Y0,"A",{href:!0,rel:!0});var Ore=l(gi);fb=o(Ore,"RoFormer: Enhanced Transformer with Rotary Position Embedding"),Ore.forEach(t),ub=o(Y0," by Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu."),Y0.forEach(t),vb=s(c),jl=a(c,"LI",{});var K0=l(jl);W4=a(K0,"STRONG",{});var Fre=l(W4);Cs=a(Fre,"A",{href:!0});var Wre=l(Cs);Eb=o(Wre,"SegFormer"),Wre.forEach(t),Fre.forEach(t),Tb=o(K0," (from NVIDIA) released with the paper "),fi=a(K0,"A",{href:!0,rel:!0});var Jre=l(fi);_b=o(Jre,"SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers"),Jre.forEach(t),mb=o(K0," by Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo."),K0.forEach(t),pb=s(c),Ql=a(c,"LI",{});var V0=l(Ql);J4=a(V0,"STRONG",{});var Yre=l(J4);Is=a(Yre,"A",{href:!0});var Kre=l(Is);Db=o(Kre,"SEW"),Kre.forEach(t),Yre.forEach(t),Ab=o(V0," (from ASAPP) released with the paper "),ui=a(V0,"A",{href:!0,rel:!0});var Vre=l(ui);Rb=o(Vre,"Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition"),Vre.forEach(t),yb=o(V0," by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi."),V0.forEach(t),bb=s(c),$l=a(c,"LI",{});var Z0=l($l);Y4=a(Z0,"STRONG",{});var Zre=l(Y4);Bs=a(Zre,"A",{href:!0});var Xre=l(Bs);Lb=o(Xre,"SEW-D"),Xre.forEach(t),Zre.forEach(t),Sb=o(Z0," (from ASAPP) released with the paper "),vi=a(Z0,"A",{href:!0,rel:!0});var zre=l(vi);wb=o(zre,"Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition"),zre.forEach(t),Mb=o(Z0," by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi."),Z0.forEach(t),Pb=s(c),en=a(c,"LI",{});var X0=l(en);K4=a(X0,"STRONG",{});var Hre=l(K4);xs=a(Hre,"A",{href:!0});var Ure=l(xs);Gb=o(Ure,"SpeechToTextTransformer"),Ure.forEach(t),Hre.forEach(t),Nb=o(X0," (from Facebook), released together with the paper "),Ei=a(X0,"A",{href:!0,rel:!0});var qre=l(Ei);kb=o(qre,"fairseq S2T: Fast Speech-to-Text Modeling with fairseq"),qre.forEach(t),Cb=o(X0," by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Dmytro Okhonko, Juan Pino."),X0.forEach(t),Ib=s(c),tn=a(c,"LI",{});var z0=l(tn);V4=a(z0,"STRONG",{});var jre=l(V4);Os=a(jre,"A",{href:!0});var Qre=l(Os);Bb=o(Qre,"SpeechToTextTransformer2"),Qre.forEach(t),jre.forEach(t),xb=o(z0," (from Facebook), released together with the paper "),Ti=a(z0,"A",{href:!0,rel:!0});var $re=l(Ti);Ob=o($re,"Large-Scale Self- and Semi-Supervised Learning for Speech Translation"),$re.forEach(t),Fb=o(z0," by Changhan Wang, Anne Wu, Juan Pino, Alexei Baevski, Michael Auli, Alexis Conneau."),z0.forEach(t),Wb=s(c),rn=a(c,"LI",{});var H0=l(rn);Z4=a(H0,"STRONG",{});var eae=l(Z4);Fs=a(eae,"A",{href:!0});var tae=l(Fs);Jb=o(tae,"Splinter"),tae.forEach(t),eae.forEach(t),Yb=o(H0," (from Tel Aviv University), released together with the paper "),_i=a(H0,"A",{href:!0,rel:!0});var rae=l(_i);Kb=o(rae,"Few-Shot Question Answering by Pretraining Span Selection"),rae.forEach(t),Vb=o(H0," by Ori Ram, Yuval Kirstain, Jonathan Berant, Amir Globerson, Omer Levy."),H0.forEach(t),Zb=s(c),an=a(c,"LI",{});var U0=l(an);X4=a(U0,"STRONG",{});var aae=l(X4);Ws=a(aae,"A",{href:!0});var lae=l(Ws);Xb=o(lae,"SqueezeBert"),lae.forEach(t),aae.forEach(t),zb=o(U0," (from Berkeley) released with the paper "),mi=a(U0,"A",{href:!0,rel:!0});var nae=l(mi);Hb=o(nae,"SqueezeBERT: What can computer vision teach NLP about efficient neural networks?"),nae.forEach(t),Ub=o(U0," by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer."),U0.forEach(t),qb=s(c),ln=a(c,"LI",{});var q0=l(ln);z4=a(q0,"STRONG",{});var oae=l(z4);Js=a(oae,"A",{href:!0});var iae=l(Js);jb=o(iae,"Swin Transformer"),iae.forEach(t),oae.forEach(t),Qb=o(q0," (from Microsoft) released with the paper "),pi=a(q0,"A",{href:!0,rel:!0});var dae=l(pi);$b=o(dae,"Swin Transformer: Hierarchical Vision Transformer using Shifted Windows"),dae.forEach(t),eL=o(q0," by Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, Baining Guo."),q0.forEach(t),tL=s(c),nn=a(c,"LI",{});var j0=l(nn);H4=a(j0,"STRONG",{});var sae=l(H4);Ys=a(sae,"A",{href:!0});var hae=l(Ys);rL=o(hae,"T5"),hae.forEach(t),sae.forEach(t),aL=o(j0," (from Google AI) released with the paper "),Di=a(j0,"A",{href:!0,rel:!0});var cae=l(Di);lL=o(cae,"Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer"),cae.forEach(t),nL=o(j0," by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu."),j0.forEach(t),oL=s(c),on=a(c,"LI",{});var Q0=l(on);U4=a(Q0,"STRONG",{});var gae=l(U4);Ks=a(gae,"A",{href:!0});var fae=l(Ks);iL=o(fae,"T5v1.1"),fae.forEach(t),gae.forEach(t),dL=o(Q0," (from Google AI) released in the repository "),Ai=a(Q0,"A",{href:!0,rel:!0});var uae=l(Ai);sL=o(uae,"google-research/text-to-text-transfer-transformer"),uae.forEach(t),hL=o(Q0," by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu."),Q0.forEach(t),cL=s(c),dn=a(c,"LI",{});var $0=l(dn);q4=a($0,"STRONG",{});var vae=l(q4);Vs=a(vae,"A",{href:!0});var Eae=l(Vs);gL=o(Eae,"TAPAS"),Eae.forEach(t),vae.forEach(t),fL=o($0," (from Google AI) released with the paper "),Ri=a($0,"A",{href:!0,rel:!0});var Tae=l(Ri);uL=o(Tae,"TAPAS: Weakly Supervised Table Parsing via Pre-training"),Tae.forEach(t),vL=o($0," by Jonathan Herzig, Pawe\u0142 Krzysztof Nowak, Thomas M\xFCller, Francesco Piccinno and Julian Martin Eisenschlos."),$0.forEach(t),EL=s(c),sn=a(c,"LI",{});var ep=l(sn);j4=a(ep,"STRONG",{});var _ae=l(j4);Zs=a(_ae,"A",{href:!0});var mae=l(Zs);TL=o(mae,"TAPEX"),mae.forEach(t),_ae.forEach(t),_L=o(ep," (from Microsoft Research) released with the paper "),yi=a(ep,"A",{href:!0,rel:!0});var pae=l(yi);mL=o(pae,"TAPEX: Table Pre-training via Learning a Neural SQL Executor"),pae.forEach(t),pL=o(ep," by Qian Liu, Bei Chen, Jiaqi Guo, Morteza Ziyadi, Zeqi Lin, Weizhu Chen, Jian-Guang Lou."),ep.forEach(t),DL=s(c),_a=a(c,"LI",{});var qi=l(_a);Q4=a(qi,"STRONG",{});var Dae=l(Q4);Xs=a(Dae,"A",{href:!0});var Aae=l(Xs);AL=o(Aae,"Transformer-XL"),Aae.forEach(t),Dae.forEach(t),RL=o(qi," (from Google/CMU) released with the paper "),bi=a(qi,"A",{href:!0,rel:!0});var Rae=l(bi);yL=o(Rae,"Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context"),Rae.forEach(t),bL=o(qi," by Zihang Dai"),$4=a(qi,"EM",{});var yae=l($4);LL=o(yae,", Zhilin Yang"),yae.forEach(t),SL=o(qi,", Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov."),qi.forEach(t),wL=s(c),hn=a(c,"LI",{});var tp=l(hn);em=a(tp,"STRONG",{});var bae=l(em);zs=a(bae,"A",{href:!0});var Lae=l(zs);ML=o(Lae,"TrOCR"),Lae.forEach(t),bae.forEach(t),PL=o(tp," (from Microsoft), released together with the paper "),Li=a(tp,"A",{href:!0,rel:!0});var Sae=l(Li);GL=o(Sae,"TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models"),Sae.forEach(t),NL=o(tp," by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei."),tp.forEach(t),kL=s(c),cn=a(c,"LI",{});var rp=l(cn);tm=a(rp,"STRONG",{});var wae=l(tm);Hs=a(wae,"A",{href:!0});var Mae=l(Hs);CL=o(Mae,"UniSpeech"),Mae.forEach(t),wae.forEach(t),IL=o(rp," (from Microsoft Research) released with the paper "),Si=a(rp,"A",{href:!0,rel:!0});var Pae=l(Si);BL=o(Pae,"UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data"),Pae.forEach(t),xL=o(rp," by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang."),rp.forEach(t),OL=s(c),gn=a(c,"LI",{});var ap=l(gn);rm=a(ap,"STRONG",{});var Gae=l(rm);Us=a(Gae,"A",{href:!0});var Nae=l(Us);FL=o(Nae,"UniSpeechSat"),Nae.forEach(t),Gae.forEach(t),WL=o(ap," (from Microsoft Research) released with the paper "),wi=a(ap,"A",{href:!0,rel:!0});var kae=l(wi);JL=o(kae,"UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING"),kae.forEach(t),YL=o(ap," by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu."),ap.forEach(t),KL=s(c),fn=a(c,"LI",{});var lp=l(fn);am=a(lp,"STRONG",{});var Cae=l(am);qs=a(Cae,"A",{href:!0});var Iae=l(qs);VL=o(Iae,"VAN"),Iae.forEach(t),Cae.forEach(t),ZL=o(lp," (from Tsinghua University and Nankai University) released with the paper "),Mi=a(lp,"A",{href:!0,rel:!0});var Bae=l(Mi);XL=o(Bae,"Visual Attention Network"),Bae.forEach(t),zL=o(lp," by Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu."),lp.forEach(t),HL=s(c),un=a(c,"LI",{});var np=l(un);lm=a(np,"STRONG",{});var xae=l(lm);js=a(xae,"A",{href:!0});var Oae=l(js);UL=o(Oae,"ViLT"),Oae.forEach(t),xae.forEach(t),qL=o(np," (from NAVER AI Lab/Kakao Enterprise/Kakao Brain) released with the paper "),Pi=a(np,"A",{href:!0,rel:!0});var Fae=l(Pi);jL=o(Fae,"ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision"),Fae.forEach(t),QL=o(np," by Wonjae Kim, Bokyung Son, Ildoo Kim."),np.forEach(t),$L=s(c),vn=a(c,"LI",{});var op=l(vn);nm=a(op,"STRONG",{});var Wae=l(nm);Qs=a(Wae,"A",{href:!0});var Jae=l(Qs);eS=o(Jae,"Vision Transformer (ViT)"),Jae.forEach(t),Wae.forEach(t),tS=o(op," (from Google AI) released with the paper "),Gi=a(op,"A",{href:!0,rel:!0});var Yae=l(Gi);rS=o(Yae,"An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale"),Yae.forEach(t),aS=o(op," by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby."),op.forEach(t),lS=s(c),En=a(c,"LI",{});var ip=l(En);om=a(ip,"STRONG",{});var Kae=l(om);$s=a(Kae,"A",{href:!0});var Vae=l($s);nS=o(Vae,"ViTMAE"),Vae.forEach(t),Kae.forEach(t),oS=o(ip," (from Meta AI) released with the paper "),Ni=a(ip,"A",{href:!0,rel:!0});var Zae=l(Ni);iS=o(Zae,"Masked Autoencoders Are Scalable Vision Learners"),Zae.forEach(t),dS=o(ip," by Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Doll\xE1r, Ross Girshick."),ip.forEach(t),sS=s(c),Tn=a(c,"LI",{});var dp=l(Tn);im=a(dp,"STRONG",{});var Xae=l(im);eh=a(Xae,"A",{href:!0});var zae=l(eh);hS=o(zae,"VisualBERT"),zae.forEach(t),Xae.forEach(t),cS=o(dp," (from UCLA NLP) released with the paper "),ki=a(dp,"A",{href:!0,rel:!0});var Hae=l(ki);gS=o(Hae,"VisualBERT: A Simple and Performant Baseline for Vision and Language"),Hae.forEach(t),fS=o(dp," by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang."),dp.forEach(t),uS=s(c),_n=a(c,"LI",{});var sp=l(_n);dm=a(sp,"STRONG",{});var Uae=l(dm);th=a(Uae,"A",{href:!0});var qae=l(th);vS=o(qae,"WavLM"),qae.forEach(t),Uae.forEach(t),ES=o(sp," (from Microsoft Research) released with the paper "),Ci=a(sp,"A",{href:!0,rel:!0});var jae=l(Ci);TS=o(jae,"WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing"),jae.forEach(t),_S=o(sp," by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei."),sp.forEach(t),mS=s(c),mn=a(c,"LI",{});var hp=l(mn);sm=a(hp,"STRONG",{});var Qae=l(sm);rh=a(Qae,"A",{href:!0});var $ae=l(rh);pS=o($ae,"Wav2Vec2"),$ae.forEach(t),Qae.forEach(t),DS=o(hp," (from Facebook AI) released with the paper "),Ii=a(hp,"A",{href:!0,rel:!0});var ele=l(Ii);AS=o(ele,"wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations"),ele.forEach(t),RS=o(hp," by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli."),hp.forEach(t),yS=s(c),pn=a(c,"LI",{});var cp=l(pn);hm=a(cp,"STRONG",{});var tle=l(hm);ah=a(tle,"A",{href:!0});var rle=l(ah);bS=o(rle,"Wav2Vec2Phoneme"),rle.forEach(t),tle.forEach(t),LS=o(cp," (from Facebook AI) released with the paper "),Bi=a(cp,"A",{href:!0,rel:!0});var ale=l(Bi);SS=o(ale,"Simple and Effective Zero-shot Cross-lingual Phoneme Recognition"),ale.forEach(t),wS=o(cp," by Qiantong Xu, Alexei Baevski, Michael Auli."),cp.forEach(t),MS=s(c),Dn=a(c,"LI",{});var gp=l(Dn);cm=a(gp,"STRONG",{});var lle=l(cm);lh=a(lle,"A",{href:!0});var nle=l(lh);PS=o(nle,"XGLM"),nle.forEach(t),lle.forEach(t),GS=o(gp," (From Facebook AI) released with the paper "),xi=a(gp,"A",{href:!0,rel:!0});var ole=l(xi);NS=o(ole,"Few-shot Learning with Multilingual Language Models"),ole.forEach(t),kS=o(gp," by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O\u2019Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li."),gp.forEach(t),CS=s(c),An=a(c,"LI",{});var fp=l(An);gm=a(fp,"STRONG",{});var ile=l(gm);nh=a(ile,"A",{href:!0});var dle=l(nh);IS=o(dle,"XLM"),dle.forEach(t),ile.forEach(t),BS=o(fp," (from Facebook) released together with the paper "),Oi=a(fp,"A",{href:!0,rel:!0});var sle=l(Oi);xS=o(sle,"Cross-lingual Language Model Pretraining"),sle.forEach(t),OS=o(fp," by Guillaume Lample and Alexis Conneau."),fp.forEach(t),FS=s(c),Rn=a(c,"LI",{});var up=l(Rn);fm=a(up,"STRONG",{});var hle=l(fm);oh=a(hle,"A",{href:!0});var cle=l(oh);WS=o(cle,"XLM-ProphetNet"),cle.forEach(t),hle.forEach(t),JS=o(up," (from Microsoft Research) released with the paper "),Fi=a(up,"A",{href:!0,rel:!0});var gle=l(Fi);YS=o(gle,"ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training"),gle.forEach(t),KS=o(up," by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou."),up.forEach(t),VS=s(c),ma=a(c,"LI",{});var ji=l(ma);um=a(ji,"STRONG",{});var fle=l(um);ih=a(fle,"A",{href:!0});var ule=l(ih);ZS=o(ule,"XLM-RoBERTa"),ule.forEach(t),fle.forEach(t),XS=o(ji," (from Facebook AI), released together with the paper "),Wi=a(ji,"A",{href:!0,rel:!0});var vle=l(Wi);zS=o(vle,"Unsupervised Cross-lingual Representation Learning at Scale"),vle.forEach(t),HS=o(ji," by Alexis Conneau"),vm=a(ji,"EM",{});var Ele=l(vm);US=o(Ele,", Kartikay Khandelwal"),Ele.forEach(t),qS=o(ji,", Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzm\xE1n, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov."),ji.forEach(t),jS=s(c),yn=a(c,"LI",{});var vp=l(yn);Em=a(vp,"STRONG",{});var Tle=l(Em);dh=a(Tle,"A",{href:!0});var _le=l(dh);QS=o(_le,"XLM-RoBERTa-XL"),_le.forEach(t),Tle.forEach(t),$S=o(vp," (from Facebook AI), released together with the paper "),Ji=a(vp,"A",{href:!0,rel:!0});var mle=l(Ji);ew=o(mle,"Larger-Scale Transformers for Multilingual Masked Language Modeling"),mle.forEach(t),tw=o(vp," by Naman Goyal, Jingfei Du, Myle Ott, Giri Anantharaman, Alexis Conneau."),vp.forEach(t),rw=s(c),pa=a(c,"LI",{});var Qi=l(pa);Tm=a(Qi,"STRONG",{});var ple=l(Tm);sh=a(ple,"A",{href:!0});var Dle=l(sh);aw=o(Dle,"XLNet"),Dle.forEach(t),ple.forEach(t),lw=o(Qi," (from Google/CMU) released with the paper "),Yi=a(Qi,"A",{href:!0,rel:!0});var Ale=l(Yi);nw=o(Ale,"\u200BXLNet: Generalized Autoregressive Pretraining for Language Understanding"),Ale.forEach(t),ow=o(Qi," by Zhilin Yang"),_m=a(Qi,"EM",{});var Rle=l(_m);iw=o(Rle,", Zihang Dai"),Rle.forEach(t),dw=o(Qi,", Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le."),Qi.forEach(t),sw=s(c),bn=a(c,"LI",{});var Ep=l(bn);mm=a(Ep,"STRONG",{});var yle=l(mm);hh=a(yle,"A",{href:!0});var ble=l(hh);hw=o(ble,"XLSR-Wav2Vec2"),ble.forEach(t),yle.forEach(t),cw=o(Ep," (from Facebook AI) released with the paper "),Ki=a(Ep,"A",{href:!0,rel:!0});var Lle=l(Ki);gw=o(Lle,"Unsupervised Cross-Lingual Representation Learning For Speech Recognition"),Lle.forEach(t),fw=o(Ep," by Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael Auli."),Ep.forEach(t),uw=s(c),Ln=a(c,"LI",{});var Tp=l(Ln);pm=a(Tp,"STRONG",{});var Sle=l(pm);ch=a(Sle,"A",{href:!0});var wle=l(ch);vw=o(wle,"XLS-R"),wle.forEach(t),Sle.forEach(t),Ew=o(Tp," (from Facebook AI) released with the paper "),Vi=a(Tp,"A",{href:!0,rel:!0});var Mle=l(Vi);Tw=o(Mle,"XLS-R: Self-supervised Cross-lingual Speech Representation Learning at Scale"),Mle.forEach(t),_w=o(Tp," by Arun Babu, Changhan Wang, Andros Tjandra, Kushal Lakhotia, Qiantong Xu, Naman Goyal, Kritika Singh, Patrick von Platen, Yatharth Saraf, Juan Pino, Alexei Baevski, Alexis Conneau, Michael Auli."),Tp.forEach(t),mw=s(c),Sn=a(c,"LI",{});var _p=l(Sn);Dm=a(_p,"STRONG",{});var Ple=l(Dm);gh=a(Ple,"A",{href:!0});var Gle=l(gh);pw=o(Gle,"YOSO"),Gle.forEach(t),Ple.forEach(t),Dw=o(_p," (from the University of Wisconsin - Madison) released with the paper "),Zi=a(_p,"A",{href:!0,rel:!0});var Nle=l(Zi);Aw=o(Nle,"You Only Sample (Almost) Once: Linear Cost Self-Attention Via Bernoulli Sampling"),Nle.forEach(t),Rw=o(_p," by Zhanpeng Zeng, Yunyang Xiong, Sathya N. Ravi, Shailesh Acharya, Glenn Fung, Vikas Singh."),_p.forEach(t),c.forEach(t),Wp=s(u),Sa=a(u,"H3",{class:!0});var Hp=l(Sa);wn=a(Hp,"A",{id:!0,class:!0,href:!0});var kle=l(wn);Am=a(kle,"SPAN",{});var Cle=l(Am);pp(Xi.$$.fragment,Cle),Cle.forEach(t),kle.forEach(t),yw=s(Hp),Rm=a(Hp,"SPAN",{});var Ile=l(Rm);bw=o(Ile,"Frameworks aceitos"),Ile.forEach(t),Hp.forEach(t),Jp=s(u),fh=a(u,"P",{});var Ble=l(fh);Lw=o(Ble,`A tabela abaixo representa a lista de suporte na biblioteca para cada um dos seguintes modelos, caso tenham um tokenizer do Python (chamado de \u201Cslow\u201D), ou um tokenizer constru\xEDdo em cima da biblioteca \u{1F917} Tokenizers (chamado de \u201Cfast\u201D). Al\xE9m disso, s\xE3o diferenciados pelo suporte em diferentes frameworks: JAX (por meio do Flax); PyTorch; e/ou Tensorflow.`),Ble.forEach(t),Yp=s(u),Mn=a(u,"TABLE",{});var Up=l(Mn);ym=a(Up,"THEAD",{});var xle=l(ym);m=a(xle,"TR",{});var _t=l(m);uh=a(_t,"TH",{align:!0});var Ole=l(uh);Sw=o(Ole,"Model"),Ole.forEach(t),ww=s(_t),vh=a(_t,"TH",{align:!0});var Fle=l(vh);Mw=o(Fle,"Tokenizer slow"),Fle.forEach(t),Pw=s(_t),Eh=a(_t,"TH",{align:!0});var Wle=l(Eh);Gw=o(Wle,"Tokenizer fast"),Wle.forEach(t),Nw=s(_t),Th=a(_t,"TH",{align:!0});var Jle=l(Th);kw=o(Jle,"PyTorch support"),Jle.forEach(t),Cw=s(_t),_h=a(_t,"TH",{align:!0});var Yle=l(_h);Iw=o(Yle,"TensorFlow support"),Yle.forEach(t),Bw=s(_t),mh=a(_t,"TH",{align:!0});var Kle=l(mh);xw=o(Kle,"Flax Support"),Kle.forEach(t),_t.forEach(t),xle.forEach(t),Ow=s(Up),g=a(Up,"TBODY",{});var f=l(g);p=a(f,"TR",{});var mt=l(p);ph=a(mt,"TD",{align:!0});var Vle=l(ph);Fw=o(Vle,"ALBERT"),Vle.forEach(t),Ww=s(mt),Dh=a(mt,"TD",{align:!0});var Zle=l(Dh);Jw=o(Zle,"\u2705"),Zle.forEach(t),Yw=s(mt),Ah=a(mt,"TD",{align:!0});var Xle=l(Ah);Kw=o(Xle,"\u2705"),Xle.forEach(t),Vw=s(mt),Rh=a(mt,"TD",{align:!0});var zle=l(Rh);Zw=o(zle,"\u2705"),zle.forEach(t),Xw=s(mt),yh=a(mt,"TD",{align:!0});var Hle=l(yh);zw=o(Hle,"\u2705"),Hle.forEach(t),Hw=s(mt),bh=a(mt,"TD",{align:!0});var Ule=l(bh);Uw=o(Ule,"\u2705"),Ule.forEach(t),mt.forEach(t),qw=s(f),D=a(f,"TR",{});var pt=l(D);Lh=a(pt,"TD",{align:!0});var qle=l(Lh);jw=o(qle,"BART"),qle.forEach(t),Qw=s(pt),Sh=a(pt,"TD",{align:!0});var jle=l(Sh);$w=o(jle,"\u2705"),jle.forEach(t),eM=s(pt),wh=a(pt,"TD",{align:!0});var Qle=l(wh);tM=o(Qle,"\u2705"),Qle.forEach(t),rM=s(pt),Mh=a(pt,"TD",{align:!0});var $le=l(Mh);aM=o($le,"\u2705"),$le.forEach(t),lM=s(pt),Ph=a(pt,"TD",{align:!0});var ene=l(Ph);nM=o(ene,"\u2705"),ene.forEach(t),oM=s(pt),Gh=a(pt,"TD",{align:!0});var tne=l(Gh);iM=o(tne,"\u2705"),tne.forEach(t),pt.forEach(t),dM=s(f),A=a(f,"TR",{});var Dt=l(A);Nh=a(Dt,"TD",{align:!0});var rne=l(Nh);sM=o(rne,"BEiT"),rne.forEach(t),hM=s(Dt),kh=a(Dt,"TD",{align:!0});var ane=l(kh);cM=o(ane,"\u274C"),ane.forEach(t),gM=s(Dt),Ch=a(Dt,"TD",{align:!0});var lne=l(Ch);fM=o(lne,"\u274C"),lne.forEach(t),uM=s(Dt),Ih=a(Dt,"TD",{align:!0});var nne=l(Ih);vM=o(nne,"\u2705"),nne.forEach(t),EM=s(Dt),Bh=a(Dt,"TD",{align:!0});var one=l(Bh);TM=o(one,"\u274C"),one.forEach(t),_M=s(Dt),xh=a(Dt,"TD",{align:!0});var ine=l(xh);mM=o(ine,"\u2705"),ine.forEach(t),Dt.forEach(t),pM=s(f),R=a(f,"TR",{});var At=l(R);Oh=a(At,"TD",{align:!0});var dne=l(Oh);DM=o(dne,"BERT"),dne.forEach(t),AM=s(At),Fh=a(At,"TD",{align:!0});var sne=l(Fh);RM=o(sne,"\u2705"),sne.forEach(t),yM=s(At),Wh=a(At,"TD",{align:!0});var hne=l(Wh);bM=o(hne,"\u2705"),hne.forEach(t),LM=s(At),Jh=a(At,"TD",{align:!0});var cne=l(Jh);SM=o(cne,"\u2705"),cne.forEach(t),wM=s(At),Yh=a(At,"TD",{align:!0});var gne=l(Yh);MM=o(gne,"\u2705"),gne.forEach(t),PM=s(At),Kh=a(At,"TD",{align:!0});var fne=l(Kh);GM=o(fne,"\u2705"),fne.forEach(t),At.forEach(t),NM=s(f),y=a(f,"TR",{});var Rt=l(y);Vh=a(Rt,"TD",{align:!0});var une=l(Vh);kM=o(une,"Bert Generation"),une.forEach(t),CM=s(Rt),Zh=a(Rt,"TD",{align:!0});var vne=l(Zh);IM=o(vne,"\u2705"),vne.forEach(t),BM=s(Rt),Xh=a(Rt,"TD",{align:!0});var Ene=l(Xh);xM=o(Ene,"\u274C"),Ene.forEach(t),OM=s(Rt),zh=a(Rt,"TD",{align:!0});var Tne=l(zh);FM=o(Tne,"\u2705"),Tne.forEach(t),WM=s(Rt),Hh=a(Rt,"TD",{align:!0});var _ne=l(Hh);JM=o(_ne,"\u274C"),_ne.forEach(t),YM=s(Rt),Uh=a(Rt,"TD",{align:!0});var mne=l(Uh);KM=o(mne,"\u274C"),mne.forEach(t),Rt.forEach(t),VM=s(f),b=a(f,"TR",{});var yt=l(b);qh=a(yt,"TD",{align:!0});var pne=l(qh);ZM=o(pne,"BigBird"),pne.forEach(t),XM=s(yt),jh=a(yt,"TD",{align:!0});var Dne=l(jh);zM=o(Dne,"\u2705"),Dne.forEach(t),HM=s(yt),Qh=a(yt,"TD",{align:!0});var Ane=l(Qh);UM=o(Ane,"\u2705"),Ane.forEach(t),qM=s(yt),$h=a(yt,"TD",{align:!0});var Rne=l($h);jM=o(Rne,"\u2705"),Rne.forEach(t),QM=s(yt),ec=a(yt,"TD",{align:!0});var yne=l(ec);$M=o(yne,"\u274C"),yne.forEach(t),eP=s(yt),tc=a(yt,"TD",{align:!0});var bne=l(tc);tP=o(bne,"\u2705"),bne.forEach(t),yt.forEach(t),rP=s(f),L=a(f,"TR",{});var bt=l(L);rc=a(bt,"TD",{align:!0});var Lne=l(rc);aP=o(Lne,"BigBirdPegasus"),Lne.forEach(t),lP=s(bt),ac=a(bt,"TD",{align:!0});var Sne=l(ac);nP=o(Sne,"\u274C"),Sne.forEach(t),oP=s(bt),lc=a(bt,"TD",{align:!0});var wne=l(lc);iP=o(wne,"\u274C"),wne.forEach(t),dP=s(bt),nc=a(bt,"TD",{align:!0});var Mne=l(nc);sP=o(Mne,"\u2705"),Mne.forEach(t),hP=s(bt),oc=a(bt,"TD",{align:!0});var Pne=l(oc);cP=o(Pne,"\u274C"),Pne.forEach(t),gP=s(bt),ic=a(bt,"TD",{align:!0});var Gne=l(ic);fP=o(Gne,"\u274C"),Gne.forEach(t),bt.forEach(t),uP=s(f),S=a(f,"TR",{});var Lt=l(S);dc=a(Lt,"TD",{align:!0});var Nne=l(dc);vP=o(Nne,"Blenderbot"),Nne.forEach(t),EP=s(Lt),sc=a(Lt,"TD",{align:!0});var kne=l(sc);TP=o(kne,"\u2705"),kne.forEach(t),_P=s(Lt),hc=a(Lt,"TD",{align:!0});var Cne=l(hc);mP=o(Cne,"\u2705"),Cne.forEach(t),pP=s(Lt),cc=a(Lt,"TD",{align:!0});var Ine=l(cc);DP=o(Ine,"\u2705"),Ine.forEach(t),AP=s(Lt),gc=a(Lt,"TD",{align:!0});var Bne=l(gc);RP=o(Bne,"\u2705"),Bne.forEach(t),yP=s(Lt),fc=a(Lt,"TD",{align:!0});var xne=l(fc);bP=o(xne,"\u2705"),xne.forEach(t),Lt.forEach(t),LP=s(f),w=a(f,"TR",{});var St=l(w);uc=a(St,"TD",{align:!0});var One=l(uc);SP=o(One,"BlenderbotSmall"),One.forEach(t),wP=s(St),vc=a(St,"TD",{align:!0});var Fne=l(vc);MP=o(Fne,"\u2705"),Fne.forEach(t),PP=s(St),Ec=a(St,"TD",{align:!0});var Wne=l(Ec);GP=o(Wne,"\u2705"),Wne.forEach(t),NP=s(St),Tc=a(St,"TD",{align:!0});var Jne=l(Tc);kP=o(Jne,"\u2705"),Jne.forEach(t),CP=s(St),_c=a(St,"TD",{align:!0});var Yne=l(_c);IP=o(Yne,"\u2705"),Yne.forEach(t),BP=s(St),mc=a(St,"TD",{align:!0});var Kne=l(mc);xP=o(Kne,"\u2705"),Kne.forEach(t),St.forEach(t),OP=s(f),M=a(f,"TR",{});var wt=l(M);pc=a(wt,"TD",{align:!0});var Vne=l(pc);FP=o(Vne,"CamemBERT"),Vne.forEach(t),WP=s(wt),Dc=a(wt,"TD",{align:!0});var Zne=l(Dc);JP=o(Zne,"\u2705"),Zne.forEach(t),YP=s(wt),Ac=a(wt,"TD",{align:!0});var Xne=l(Ac);KP=o(Xne,"\u2705"),Xne.forEach(t),VP=s(wt),Rc=a(wt,"TD",{align:!0});var zne=l(Rc);ZP=o(zne,"\u2705"),zne.forEach(t),XP=s(wt),yc=a(wt,"TD",{align:!0});var Hne=l(yc);zP=o(Hne,"\u2705"),Hne.forEach(t),HP=s(wt),bc=a(wt,"TD",{align:!0});var Une=l(bc);UP=o(Une,"\u274C"),Une.forEach(t),wt.forEach(t),qP=s(f),P=a(f,"TR",{});var Mt=l(P);Lc=a(Mt,"TD",{align:!0});var qne=l(Lc);jP=o(qne,"Canine"),qne.forEach(t),QP=s(Mt),Sc=a(Mt,"TD",{align:!0});var jne=l(Sc);$P=o(jne,"\u2705"),jne.forEach(t),eG=s(Mt),wc=a(Mt,"TD",{align:!0});var Qne=l(wc);tG=o(Qne,"\u274C"),Qne.forEach(t),rG=s(Mt),Mc=a(Mt,"TD",{align:!0});var $ne=l(Mc);aG=o($ne,"\u2705"),$ne.forEach(t),lG=s(Mt),Pc=a(Mt,"TD",{align:!0});var eoe=l(Pc);nG=o(eoe,"\u274C"),eoe.forEach(t),oG=s(Mt),Gc=a(Mt,"TD",{align:!0});var toe=l(Gc);iG=o(toe,"\u274C"),toe.forEach(t),Mt.forEach(t),dG=s(f),G=a(f,"TR",{});var Pt=l(G);Nc=a(Pt,"TD",{align:!0});var roe=l(Nc);sG=o(roe,"CLIP"),roe.forEach(t),hG=s(Pt),kc=a(Pt,"TD",{align:!0});var aoe=l(kc);cG=o(aoe,"\u2705"),aoe.forEach(t),gG=s(Pt),Cc=a(Pt,"TD",{align:!0});var loe=l(Cc);fG=o(loe,"\u2705"),loe.forEach(t),uG=s(Pt),Ic=a(Pt,"TD",{align:!0});var noe=l(Ic);vG=o(noe,"\u2705"),noe.forEach(t),EG=s(Pt),Bc=a(Pt,"TD",{align:!0});var ooe=l(Bc);TG=o(ooe,"\u2705"),ooe.forEach(t),_G=s(Pt),xc=a(Pt,"TD",{align:!0});var ioe=l(xc);mG=o(ioe,"\u2705"),ioe.forEach(t),Pt.forEach(t),pG=s(f),N=a(f,"TR",{});var Gt=l(N);Oc=a(Gt,"TD",{align:!0});var doe=l(Oc);DG=o(doe,"ConvBERT"),doe.forEach(t),AG=s(Gt),Fc=a(Gt,"TD",{align:!0});var soe=l(Fc);RG=o(soe,"\u2705"),soe.forEach(t),yG=s(Gt),Wc=a(Gt,"TD",{align:!0});var hoe=l(Wc);bG=o(hoe,"\u2705"),hoe.forEach(t),LG=s(Gt),Jc=a(Gt,"TD",{align:!0});var coe=l(Jc);SG=o(coe,"\u2705"),coe.forEach(t),wG=s(Gt),Yc=a(Gt,"TD",{align:!0});var goe=l(Yc);MG=o(goe,"\u2705"),goe.forEach(t),PG=s(Gt),Kc=a(Gt,"TD",{align:!0});var foe=l(Kc);GG=o(foe,"\u274C"),foe.forEach(t),Gt.forEach(t),NG=s(f),k=a(f,"TR",{});var Nt=l(k);Vc=a(Nt,"TD",{align:!0});var uoe=l(Vc);kG=o(uoe,"ConvNext"),uoe.forEach(t),CG=s(Nt),Zc=a(Nt,"TD",{align:!0});var voe=l(Zc);IG=o(voe,"\u274C"),voe.forEach(t),BG=s(Nt),Xc=a(Nt,"TD",{align:!0});var Eoe=l(Xc);xG=o(Eoe,"\u274C"),Eoe.forEach(t),OG=s(Nt),zc=a(Nt,"TD",{align:!0});var Toe=l(zc);FG=o(Toe,"\u2705"),Toe.forEach(t),WG=s(Nt),Hc=a(Nt,"TD",{align:!0});var _oe=l(Hc);JG=o(_oe,"\u2705"),_oe.forEach(t),YG=s(Nt),Uc=a(Nt,"TD",{align:!0});var moe=l(Uc);KG=o(moe,"\u274C"),moe.forEach(t),Nt.forEach(t),VG=s(f),C=a(f,"TR",{});var kt=l(C);qc=a(kt,"TD",{align:!0});var poe=l(qc);ZG=o(poe,"CTRL"),poe.forEach(t),XG=s(kt),jc=a(kt,"TD",{align:!0});var Doe=l(jc);zG=o(Doe,"\u2705"),Doe.forEach(t),HG=s(kt),Qc=a(kt,"TD",{align:!0});var Aoe=l(Qc);UG=o(Aoe,"\u274C"),Aoe.forEach(t),qG=s(kt),$c=a(kt,"TD",{align:!0});var Roe=l($c);jG=o(Roe,"\u2705"),Roe.forEach(t),QG=s(kt),eg=a(kt,"TD",{align:!0});var yoe=l(eg);$G=o(yoe,"\u2705"),yoe.forEach(t),eN=s(kt),tg=a(kt,"TD",{align:!0});var boe=l(tg);tN=o(boe,"\u274C"),boe.forEach(t),kt.forEach(t),rN=s(f),I=a(f,"TR",{});var Ct=l(I);rg=a(Ct,"TD",{align:!0});var Loe=l(rg);aN=o(Loe,"Data2VecAudio"),Loe.forEach(t),lN=s(Ct),ag=a(Ct,"TD",{align:!0});var Soe=l(ag);nN=o(Soe,"\u274C"),Soe.forEach(t),oN=s(Ct),lg=a(Ct,"TD",{align:!0});var woe=l(lg);iN=o(woe,"\u274C"),woe.forEach(t),dN=s(Ct),ng=a(Ct,"TD",{align:!0});var Moe=l(ng);sN=o(Moe,"\u2705"),Moe.forEach(t),hN=s(Ct),og=a(Ct,"TD",{align:!0});var Poe=l(og);cN=o(Poe,"\u274C"),Poe.forEach(t),gN=s(Ct),ig=a(Ct,"TD",{align:!0});var Goe=l(ig);fN=o(Goe,"\u274C"),Goe.forEach(t),Ct.forEach(t),uN=s(f),B=a(f,"TR",{});var It=l(B);dg=a(It,"TD",{align:!0});var Noe=l(dg);vN=o(Noe,"Data2VecText"),Noe.forEach(t),EN=s(It),sg=a(It,"TD",{align:!0});var koe=l(sg);TN=o(koe,"\u274C"),koe.forEach(t),_N=s(It),hg=a(It,"TD",{align:!0});var Coe=l(hg);mN=o(Coe,"\u274C"),Coe.forEach(t),pN=s(It),cg=a(It,"TD",{align:!0});var Ioe=l(cg);DN=o(Ioe,"\u2705"),Ioe.forEach(t),AN=s(It),gg=a(It,"TD",{align:!0});var Boe=l(gg);RN=o(Boe,"\u274C"),Boe.forEach(t),yN=s(It),fg=a(It,"TD",{align:!0});var xoe=l(fg);bN=o(xoe,"\u274C"),xoe.forEach(t),It.forEach(t),LN=s(f),x=a(f,"TR",{});var Bt=l(x);ug=a(Bt,"TD",{align:!0});var Ooe=l(ug);SN=o(Ooe,"Data2VecVision"),Ooe.forEach(t),wN=s(Bt),vg=a(Bt,"TD",{align:!0});var Foe=l(vg);MN=o(Foe,"\u274C"),Foe.forEach(t),PN=s(Bt),Eg=a(Bt,"TD",{align:!0});var Woe=l(Eg);GN=o(Woe,"\u274C"),Woe.forEach(t),NN=s(Bt),Tg=a(Bt,"TD",{align:!0});var Joe=l(Tg);kN=o(Joe,"\u2705"),Joe.forEach(t),CN=s(Bt),_g=a(Bt,"TD",{align:!0});var Yoe=l(_g);IN=o(Yoe,"\u274C"),Yoe.forEach(t),BN=s(Bt),mg=a(Bt,"TD",{align:!0});var Koe=l(mg);xN=o(Koe,"\u274C"),Koe.forEach(t),Bt.forEach(t),ON=s(f),O=a(f,"TR",{});var xt=l(O);pg=a(xt,"TD",{align:!0});var Voe=l(pg);FN=o(Voe,"DeBERTa"),Voe.forEach(t),WN=s(xt),Dg=a(xt,"TD",{align:!0});var Zoe=l(Dg);JN=o(Zoe,"\u2705"),Zoe.forEach(t),YN=s(xt),Ag=a(xt,"TD",{align:!0});var Xoe=l(Ag);KN=o(Xoe,"\u2705"),Xoe.forEach(t),VN=s(xt),Rg=a(xt,"TD",{align:!0});var zoe=l(Rg);ZN=o(zoe,"\u2705"),zoe.forEach(t),XN=s(xt),yg=a(xt,"TD",{align:!0});var Hoe=l(yg);zN=o(Hoe,"\u2705"),Hoe.forEach(t),HN=s(xt),bg=a(xt,"TD",{align:!0});var Uoe=l(bg);UN=o(Uoe,"\u274C"),Uoe.forEach(t),xt.forEach(t),qN=s(f),F=a(f,"TR",{});var Ot=l(F);Lg=a(Ot,"TD",{align:!0});var qoe=l(Lg);jN=o(qoe,"DeBERTa-v2"),qoe.forEach(t),QN=s(Ot),Sg=a(Ot,"TD",{align:!0});var joe=l(Sg);$N=o(joe,"\u2705"),joe.forEach(t),ek=s(Ot),wg=a(Ot,"TD",{align:!0});var Qoe=l(wg);tk=o(Qoe,"\u2705"),Qoe.forEach(t),rk=s(Ot),Mg=a(Ot,"TD",{align:!0});var $oe=l(Mg);ak=o($oe,"\u2705"),$oe.forEach(t),lk=s(Ot),Pg=a(Ot,"TD",{align:!0});var eie=l(Pg);nk=o(eie,"\u2705"),eie.forEach(t),ok=s(Ot),Gg=a(Ot,"TD",{align:!0});var tie=l(Gg);ik=o(tie,"\u274C"),tie.forEach(t),Ot.forEach(t),dk=s(f),W=a(f,"TR",{});var Ft=l(W);Ng=a(Ft,"TD",{align:!0});var rie=l(Ng);sk=o(rie,"Decision Transformer"),rie.forEach(t),hk=s(Ft),kg=a(Ft,"TD",{align:!0});var aie=l(kg);ck=o(aie,"\u274C"),aie.forEach(t),gk=s(Ft),Cg=a(Ft,"TD",{align:!0});var lie=l(Cg);fk=o(lie,"\u274C"),lie.forEach(t),uk=s(Ft),Ig=a(Ft,"TD",{align:!0});var nie=l(Ig);vk=o(nie,"\u2705"),nie.forEach(t),Ek=s(Ft),Bg=a(Ft,"TD",{align:!0});var oie=l(Bg);Tk=o(oie,"\u274C"),oie.forEach(t),_k=s(Ft),xg=a(Ft,"TD",{align:!0});var iie=l(xg);mk=o(iie,"\u274C"),iie.forEach(t),Ft.forEach(t),pk=s(f),J=a(f,"TR",{});var Wt=l(J);Og=a(Wt,"TD",{align:!0});var die=l(Og);Dk=o(die,"DeiT"),die.forEach(t),Ak=s(Wt),Fg=a(Wt,"TD",{align:!0});var sie=l(Fg);Rk=o(sie,"\u274C"),sie.forEach(t),yk=s(Wt),Wg=a(Wt,"TD",{align:!0});var hie=l(Wg);bk=o(hie,"\u274C"),hie.forEach(t),Lk=s(Wt),Jg=a(Wt,"TD",{align:!0});var cie=l(Jg);Sk=o(cie,"\u2705"),cie.forEach(t),wk=s(Wt),Yg=a(Wt,"TD",{align:!0});var gie=l(Yg);Mk=o(gie,"\u274C"),gie.forEach(t),Pk=s(Wt),Kg=a(Wt,"TD",{align:!0});var fie=l(Kg);Gk=o(fie,"\u274C"),fie.forEach(t),Wt.forEach(t),Nk=s(f),Y=a(f,"TR",{});var Jt=l(Y);Vg=a(Jt,"TD",{align:!0});var uie=l(Vg);kk=o(uie,"DETR"),uie.forEach(t),Ck=s(Jt),Zg=a(Jt,"TD",{align:!0});var vie=l(Zg);Ik=o(vie,"\u274C"),vie.forEach(t),Bk=s(Jt),Xg=a(Jt,"TD",{align:!0});var Eie=l(Xg);xk=o(Eie,"\u274C"),Eie.forEach(t),Ok=s(Jt),zg=a(Jt,"TD",{align:!0});var Tie=l(zg);Fk=o(Tie,"\u2705"),Tie.forEach(t),Wk=s(Jt),Hg=a(Jt,"TD",{align:!0});var _ie=l(Hg);Jk=o(_ie,"\u274C"),_ie.forEach(t),Yk=s(Jt),Ug=a(Jt,"TD",{align:!0});var mie=l(Ug);Kk=o(mie,"\u274C"),mie.forEach(t),Jt.forEach(t),Vk=s(f),K=a(f,"TR",{});var Yt=l(K);qg=a(Yt,"TD",{align:!0});var pie=l(qg);Zk=o(pie,"DistilBERT"),pie.forEach(t),Xk=s(Yt),jg=a(Yt,"TD",{align:!0});var Die=l(jg);zk=o(Die,"\u2705"),Die.forEach(t),Hk=s(Yt),Qg=a(Yt,"TD",{align:!0});var Aie=l(Qg);Uk=o(Aie,"\u2705"),Aie.forEach(t),qk=s(Yt),$g=a(Yt,"TD",{align:!0});var Rie=l($g);jk=o(Rie,"\u2705"),Rie.forEach(t),Qk=s(Yt),e1=a(Yt,"TD",{align:!0});var yie=l(e1);$k=o(yie,"\u2705"),yie.forEach(t),eC=s(Yt),t1=a(Yt,"TD",{align:!0});var bie=l(t1);tC=o(bie,"\u2705"),bie.forEach(t),Yt.forEach(t),rC=s(f),V=a(f,"TR",{});var Kt=l(V);r1=a(Kt,"TD",{align:!0});var Lie=l(r1);aC=o(Lie,"DPR"),Lie.forEach(t),lC=s(Kt),a1=a(Kt,"TD",{align:!0});var Sie=l(a1);nC=o(Sie,"\u2705"),Sie.forEach(t),oC=s(Kt),l1=a(Kt,"TD",{align:!0});var wie=l(l1);iC=o(wie,"\u2705"),wie.forEach(t),dC=s(Kt),n1=a(Kt,"TD",{align:!0});var Mie=l(n1);sC=o(Mie,"\u2705"),Mie.forEach(t),hC=s(Kt),o1=a(Kt,"TD",{align:!0});var Pie=l(o1);cC=o(Pie,"\u2705"),Pie.forEach(t),gC=s(Kt),i1=a(Kt,"TD",{align:!0});var Gie=l(i1);fC=o(Gie,"\u274C"),Gie.forEach(t),Kt.forEach(t),uC=s(f),Z=a(f,"TR",{});var Vt=l(Z);d1=a(Vt,"TD",{align:!0});var Nie=l(d1);vC=o(Nie,"DPT"),Nie.forEach(t),EC=s(Vt),s1=a(Vt,"TD",{align:!0});var kie=l(s1);TC=o(kie,"\u274C"),kie.forEach(t),_C=s(Vt),h1=a(Vt,"TD",{align:!0});var Cie=l(h1);mC=o(Cie,"\u274C"),Cie.forEach(t),pC=s(Vt),c1=a(Vt,"TD",{align:!0});var Iie=l(c1);DC=o(Iie,"\u2705"),Iie.forEach(t),AC=s(Vt),g1=a(Vt,"TD",{align:!0});var Bie=l(g1);RC=o(Bie,"\u274C"),Bie.forEach(t),yC=s(Vt),f1=a(Vt,"TD",{align:!0});var xie=l(f1);bC=o(xie,"\u274C"),xie.forEach(t),Vt.forEach(t),LC=s(f),X=a(f,"TR",{});var Zt=l(X);u1=a(Zt,"TD",{align:!0});var Oie=l(u1);SC=o(Oie,"ELECTRA"),Oie.forEach(t),wC=s(Zt),v1=a(Zt,"TD",{align:!0});var Fie=l(v1);MC=o(Fie,"\u2705"),Fie.forEach(t),PC=s(Zt),E1=a(Zt,"TD",{align:!0});var Wie=l(E1);GC=o(Wie,"\u2705"),Wie.forEach(t),NC=s(Zt),T1=a(Zt,"TD",{align:!0});var Jie=l(T1);kC=o(Jie,"\u2705"),Jie.forEach(t),CC=s(Zt),_1=a(Zt,"TD",{align:!0});var Yie=l(_1);IC=o(Yie,"\u2705"),Yie.forEach(t),BC=s(Zt),m1=a(Zt,"TD",{align:!0});var Kie=l(m1);xC=o(Kie,"\u2705"),Kie.forEach(t),Zt.forEach(t),OC=s(f),z=a(f,"TR",{});var Xt=l(z);p1=a(Xt,"TD",{align:!0});var Vie=l(p1);FC=o(Vie,"Encoder decoder"),Vie.forEach(t),WC=s(Xt),D1=a(Xt,"TD",{align:!0});var Zie=l(D1);JC=o(Zie,"\u274C"),Zie.forEach(t),YC=s(Xt),A1=a(Xt,"TD",{align:!0});var Xie=l(A1);KC=o(Xie,"\u274C"),Xie.forEach(t),VC=s(Xt),R1=a(Xt,"TD",{align:!0});var zie=l(R1);ZC=o(zie,"\u2705"),zie.forEach(t),XC=s(Xt),y1=a(Xt,"TD",{align:!0});var Hie=l(y1);zC=o(Hie,"\u2705"),Hie.forEach(t),HC=s(Xt),b1=a(Xt,"TD",{align:!0});var Uie=l(b1);UC=o(Uie,"\u2705"),Uie.forEach(t),Xt.forEach(t),qC=s(f),H=a(f,"TR",{});var zt=l(H);L1=a(zt,"TD",{align:!0});var qie=l(L1);jC=o(qie,"FairSeq Machine-Translation"),qie.forEach(t),QC=s(zt),S1=a(zt,"TD",{align:!0});var jie=l(S1);$C=o(jie,"\u2705"),jie.forEach(t),eI=s(zt),w1=a(zt,"TD",{align:!0});var Qie=l(w1);tI=o(Qie,"\u274C"),Qie.forEach(t),rI=s(zt),M1=a(zt,"TD",{align:!0});var $ie=l(M1);aI=o($ie,"\u2705"),$ie.forEach(t),lI=s(zt),P1=a(zt,"TD",{align:!0});var ede=l(P1);nI=o(ede,"\u274C"),ede.forEach(t),oI=s(zt),G1=a(zt,"TD",{align:!0});var tde=l(G1);iI=o(tde,"\u274C"),tde.forEach(t),zt.forEach(t),dI=s(f),U=a(f,"TR",{});var Ht=l(U);N1=a(Ht,"TD",{align:!0});var rde=l(N1);sI=o(rde,"FlauBERT"),rde.forEach(t),hI=s(Ht),k1=a(Ht,"TD",{align:!0});var ade=l(k1);cI=o(ade,"\u2705"),ade.forEach(t),gI=s(Ht),C1=a(Ht,"TD",{align:!0});var lde=l(C1);fI=o(lde,"\u274C"),lde.forEach(t),uI=s(Ht),I1=a(Ht,"TD",{align:!0});var nde=l(I1);vI=o(nde,"\u2705"),nde.forEach(t),EI=s(Ht),B1=a(Ht,"TD",{align:!0});var ode=l(B1);TI=o(ode,"\u2705"),ode.forEach(t),_I=s(Ht),x1=a(Ht,"TD",{align:!0});var ide=l(x1);mI=o(ide,"\u274C"),ide.forEach(t),Ht.forEach(t),pI=s(f),q=a(f,"TR",{});var Ut=l(q);O1=a(Ut,"TD",{align:!0});var dde=l(O1);DI=o(dde,"FNet"),dde.forEach(t),AI=s(Ut),F1=a(Ut,"TD",{align:!0});var sde=l(F1);RI=o(sde,"\u2705"),sde.forEach(t),yI=s(Ut),W1=a(Ut,"TD",{align:!0});var hde=l(W1);bI=o(hde,"\u2705"),hde.forEach(t),LI=s(Ut),J1=a(Ut,"TD",{align:!0});var cde=l(J1);SI=o(cde,"\u2705"),cde.forEach(t),wI=s(Ut),Y1=a(Ut,"TD",{align:!0});var gde=l(Y1);MI=o(gde,"\u274C"),gde.forEach(t),PI=s(Ut),K1=a(Ut,"TD",{align:!0});var fde=l(K1);GI=o(fde,"\u274C"),fde.forEach(t),Ut.forEach(t),NI=s(f),j=a(f,"TR",{});var qt=l(j);V1=a(qt,"TD",{align:!0});var ude=l(V1);kI=o(ude,"Funnel Transformer"),ude.forEach(t),CI=s(qt),Z1=a(qt,"TD",{align:!0});var vde=l(Z1);II=o(vde,"\u2705"),vde.forEach(t),BI=s(qt),X1=a(qt,"TD",{align:!0});var Ede=l(X1);xI=o(Ede,"\u2705"),Ede.forEach(t),OI=s(qt),z1=a(qt,"TD",{align:!0});var Tde=l(z1);FI=o(Tde,"\u2705"),Tde.forEach(t),WI=s(qt),H1=a(qt,"TD",{align:!0});var _de=l(H1);JI=o(_de,"\u2705"),_de.forEach(t),YI=s(qt),U1=a(qt,"TD",{align:!0});var mde=l(U1);KI=o(mde,"\u274C"),mde.forEach(t),qt.forEach(t),VI=s(f),Q=a(f,"TR",{});var jt=l(Q);q1=a(jt,"TD",{align:!0});var pde=l(q1);ZI=o(pde,"GLPN"),pde.forEach(t),XI=s(jt),j1=a(jt,"TD",{align:!0});var Dde=l(j1);zI=o(Dde,"\u274C"),Dde.forEach(t),HI=s(jt),Q1=a(jt,"TD",{align:!0});var Ade=l(Q1);UI=o(Ade,"\u274C"),Ade.forEach(t),qI=s(jt),$1=a(jt,"TD",{align:!0});var Rde=l($1);jI=o(Rde,"\u2705"),Rde.forEach(t),QI=s(jt),ef=a(jt,"TD",{align:!0});var yde=l(ef);$I=o(yde,"\u274C"),yde.forEach(t),eB=s(jt),tf=a(jt,"TD",{align:!0});var bde=l(tf);tB=o(bde,"\u274C"),bde.forEach(t),jt.forEach(t),rB=s(f),$=a(f,"TR",{});var Qt=l($);rf=a(Qt,"TD",{align:!0});var Lde=l(rf);aB=o(Lde,"GPT Neo"),Lde.forEach(t),lB=s(Qt),af=a(Qt,"TD",{align:!0});var Sde=l(af);nB=o(Sde,"\u274C"),Sde.forEach(t),oB=s(Qt),lf=a(Qt,"TD",{align:!0});var wde=l(lf);iB=o(wde,"\u274C"),wde.forEach(t),dB=s(Qt),nf=a(Qt,"TD",{align:!0});var Mde=l(nf);sB=o(Mde,"\u2705"),Mde.forEach(t),hB=s(Qt),of=a(Qt,"TD",{align:!0});var Pde=l(of);cB=o(Pde,"\u274C"),Pde.forEach(t),gB=s(Qt),df=a(Qt,"TD",{align:!0});var Gde=l(df);fB=o(Gde,"\u2705"),Gde.forEach(t),Qt.forEach(t),uB=s(f),ee=a(f,"TR",{});var $t=l(ee);sf=a($t,"TD",{align:!0});var Nde=l(sf);vB=o(Nde,"GPT-J"),Nde.forEach(t),EB=s($t),hf=a($t,"TD",{align:!0});var kde=l(hf);TB=o(kde,"\u274C"),kde.forEach(t),_B=s($t),cf=a($t,"TD",{align:!0});var Cde=l(cf);mB=o(Cde,"\u274C"),Cde.forEach(t),pB=s($t),gf=a($t,"TD",{align:!0});var Ide=l(gf);DB=o(Ide,"\u2705"),Ide.forEach(t),AB=s($t),ff=a($t,"TD",{align:!0});var Bde=l(ff);RB=o(Bde,"\u2705"),Bde.forEach(t),yB=s($t),uf=a($t,"TD",{align:!0});var xde=l(uf);bB=o(xde,"\u2705"),xde.forEach(t),$t.forEach(t),LB=s(f),te=a(f,"TR",{});var er=l(te);vf=a(er,"TD",{align:!0});var Ode=l(vf);SB=o(Ode,"Hubert"),Ode.forEach(t),wB=s(er),Ef=a(er,"TD",{align:!0});var Fde=l(Ef);MB=o(Fde,"\u274C"),Fde.forEach(t),PB=s(er),Tf=a(er,"TD",{align:!0});var Wde=l(Tf);GB=o(Wde,"\u274C"),Wde.forEach(t),NB=s(er),_f=a(er,"TD",{align:!0});var Jde=l(_f);kB=o(Jde,"\u2705"),Jde.forEach(t),CB=s(er),mf=a(er,"TD",{align:!0});var Yde=l(mf);IB=o(Yde,"\u2705"),Yde.forEach(t),BB=s(er),pf=a(er,"TD",{align:!0});var Kde=l(pf);xB=o(Kde,"\u274C"),Kde.forEach(t),er.forEach(t),OB=s(f),re=a(f,"TR",{});var tr=l(re);Df=a(tr,"TD",{align:!0});var Vde=l(Df);FB=o(Vde,"I-BERT"),Vde.forEach(t),WB=s(tr),Af=a(tr,"TD",{align:!0});var Zde=l(Af);JB=o(Zde,"\u274C"),Zde.forEach(t),YB=s(tr),Rf=a(tr,"TD",{align:!0});var Xde=l(Rf);KB=o(Xde,"\u274C"),Xde.forEach(t),VB=s(tr),yf=a(tr,"TD",{align:!0});var zde=l(yf);ZB=o(zde,"\u2705"),zde.forEach(t),XB=s(tr),bf=a(tr,"TD",{align:!0});var Hde=l(bf);zB=o(Hde,"\u274C"),Hde.forEach(t),HB=s(tr),Lf=a(tr,"TD",{align:!0});var Ude=l(Lf);UB=o(Ude,"\u274C"),Ude.forEach(t),tr.forEach(t),qB=s(f),ae=a(f,"TR",{});var rr=l(ae);Sf=a(rr,"TD",{align:!0});var qde=l(Sf);jB=o(qde,"ImageGPT"),qde.forEach(t),QB=s(rr),wf=a(rr,"TD",{align:!0});var jde=l(wf);$B=o(jde,"\u274C"),jde.forEach(t),ex=s(rr),Mf=a(rr,"TD",{align:!0});var Qde=l(Mf);tx=o(Qde,"\u274C"),Qde.forEach(t),rx=s(rr),Pf=a(rr,"TD",{align:!0});var $de=l(Pf);ax=o($de,"\u2705"),$de.forEach(t),lx=s(rr),Gf=a(rr,"TD",{align:!0});var ese=l(Gf);nx=o(ese,"\u274C"),ese.forEach(t),ox=s(rr),Nf=a(rr,"TD",{align:!0});var tse=l(Nf);ix=o(tse,"\u274C"),tse.forEach(t),rr.forEach(t),dx=s(f),le=a(f,"TR",{});var ar=l(le);kf=a(ar,"TD",{align:!0});var rse=l(kf);sx=o(rse,"LayoutLM"),rse.forEach(t),hx=s(ar),Cf=a(ar,"TD",{align:!0});var ase=l(Cf);cx=o(ase,"\u2705"),ase.forEach(t),gx=s(ar),If=a(ar,"TD",{align:!0});var lse=l(If);fx=o(lse,"\u2705"),lse.forEach(t),ux=s(ar),Bf=a(ar,"TD",{align:!0});var nse=l(Bf);vx=o(nse,"\u2705"),nse.forEach(t),Ex=s(ar),xf=a(ar,"TD",{align:!0});var ose=l(xf);Tx=o(ose,"\u2705"),ose.forEach(t),_x=s(ar),Of=a(ar,"TD",{align:!0});var ise=l(Of);mx=o(ise,"\u274C"),ise.forEach(t),ar.forEach(t),px=s(f),ne=a(f,"TR",{});var lr=l(ne);Ff=a(lr,"TD",{align:!0});var dse=l(Ff);Dx=o(dse,"LayoutLMv2"),dse.forEach(t),Ax=s(lr),Wf=a(lr,"TD",{align:!0});var sse=l(Wf);Rx=o(sse,"\u2705"),sse.forEach(t),yx=s(lr),Jf=a(lr,"TD",{align:!0});var hse=l(Jf);bx=o(hse,"\u2705"),hse.forEach(t),Lx=s(lr),Yf=a(lr,"TD",{align:!0});var cse=l(Yf);Sx=o(cse,"\u2705"),cse.forEach(t),wx=s(lr),Kf=a(lr,"TD",{align:!0});var gse=l(Kf);Mx=o(gse,"\u274C"),gse.forEach(t),Px=s(lr),Vf=a(lr,"TD",{align:!0});var fse=l(Vf);Gx=o(fse,"\u274C"),fse.forEach(t),lr.forEach(t),Nx=s(f),oe=a(f,"TR",{});var nr=l(oe);Zf=a(nr,"TD",{align:!0});var use=l(Zf);kx=o(use,"LED"),use.forEach(t),Cx=s(nr),Xf=a(nr,"TD",{align:!0});var vse=l(Xf);Ix=o(vse,"\u2705"),vse.forEach(t),Bx=s(nr),zf=a(nr,"TD",{align:!0});var Ese=l(zf);xx=o(Ese,"\u2705"),Ese.forEach(t),Ox=s(nr),Hf=a(nr,"TD",{align:!0});var Tse=l(Hf);Fx=o(Tse,"\u2705"),Tse.forEach(t),Wx=s(nr),Uf=a(nr,"TD",{align:!0});var _se=l(Uf);Jx=o(_se,"\u2705"),_se.forEach(t),Yx=s(nr),qf=a(nr,"TD",{align:!0});var mse=l(qf);Kx=o(mse,"\u274C"),mse.forEach(t),nr.forEach(t),Vx=s(f),ie=a(f,"TR",{});var or=l(ie);jf=a(or,"TD",{align:!0});var pse=l(jf);Zx=o(pse,"Longformer"),pse.forEach(t),Xx=s(or),Qf=a(or,"TD",{align:!0});var Dse=l(Qf);zx=o(Dse,"\u2705"),Dse.forEach(t),Hx=s(or),$f=a(or,"TD",{align:!0});var Ase=l($f);Ux=o(Ase,"\u2705"),Ase.forEach(t),qx=s(or),eu=a(or,"TD",{align:!0});var Rse=l(eu);jx=o(Rse,"\u2705"),Rse.forEach(t),Qx=s(or),tu=a(or,"TD",{align:!0});var yse=l(tu);$x=o(yse,"\u2705"),yse.forEach(t),eO=s(or),ru=a(or,"TD",{align:!0});var bse=l(ru);tO=o(bse,"\u274C"),bse.forEach(t),or.forEach(t),rO=s(f),de=a(f,"TR",{});var ir=l(de);au=a(ir,"TD",{align:!0});var Lse=l(au);aO=o(Lse,"LUKE"),Lse.forEach(t),lO=s(ir),lu=a(ir,"TD",{align:!0});var Sse=l(lu);nO=o(Sse,"\u2705"),Sse.forEach(t),oO=s(ir),nu=a(ir,"TD",{align:!0});var wse=l(nu);iO=o(wse,"\u274C"),wse.forEach(t),dO=s(ir),ou=a(ir,"TD",{align:!0});var Mse=l(ou);sO=o(Mse,"\u2705"),Mse.forEach(t),hO=s(ir),iu=a(ir,"TD",{align:!0});var Pse=l(iu);cO=o(Pse,"\u274C"),Pse.forEach(t),gO=s(ir),du=a(ir,"TD",{align:!0});var Gse=l(du);fO=o(Gse,"\u274C"),Gse.forEach(t),ir.forEach(t),uO=s(f),se=a(f,"TR",{});var dr=l(se);su=a(dr,"TD",{align:!0});var Nse=l(su);vO=o(Nse,"LXMERT"),Nse.forEach(t),EO=s(dr),hu=a(dr,"TD",{align:!0});var kse=l(hu);TO=o(kse,"\u2705"),kse.forEach(t),_O=s(dr),cu=a(dr,"TD",{align:!0});var Cse=l(cu);mO=o(Cse,"\u2705"),Cse.forEach(t),pO=s(dr),gu=a(dr,"TD",{align:!0});var Ise=l(gu);DO=o(Ise,"\u2705"),Ise.forEach(t),AO=s(dr),fu=a(dr,"TD",{align:!0});var Bse=l(fu);RO=o(Bse,"\u2705"),Bse.forEach(t),yO=s(dr),uu=a(dr,"TD",{align:!0});var xse=l(uu);bO=o(xse,"\u274C"),xse.forEach(t),dr.forEach(t),LO=s(f),he=a(f,"TR",{});var sr=l(he);vu=a(sr,"TD",{align:!0});var Ose=l(vu);SO=o(Ose,"M2M100"),Ose.forEach(t),wO=s(sr),Eu=a(sr,"TD",{align:!0});var Fse=l(Eu);MO=o(Fse,"\u2705"),Fse.forEach(t),PO=s(sr),Tu=a(sr,"TD",{align:!0});var Wse=l(Tu);GO=o(Wse,"\u274C"),Wse.forEach(t),NO=s(sr),_u=a(sr,"TD",{align:!0});var Jse=l(_u);kO=o(Jse,"\u2705"),Jse.forEach(t),CO=s(sr),mu=a(sr,"TD",{align:!0});var Yse=l(mu);IO=o(Yse,"\u274C"),Yse.forEach(t),BO=s(sr),pu=a(sr,"TD",{align:!0});var Kse=l(pu);xO=o(Kse,"\u274C"),Kse.forEach(t),sr.forEach(t),OO=s(f),ce=a(f,"TR",{});var hr=l(ce);Du=a(hr,"TD",{align:!0});var Vse=l(Du);FO=o(Vse,"Marian"),Vse.forEach(t),WO=s(hr),Au=a(hr,"TD",{align:!0});var Zse=l(Au);JO=o(Zse,"\u2705"),Zse.forEach(t),YO=s(hr),Ru=a(hr,"TD",{align:!0});var Xse=l(Ru);KO=o(Xse,"\u274C"),Xse.forEach(t),VO=s(hr),yu=a(hr,"TD",{align:!0});var zse=l(yu);ZO=o(zse,"\u2705"),zse.forEach(t),XO=s(hr),bu=a(hr,"TD",{align:!0});var Hse=l(bu);zO=o(Hse,"\u2705"),Hse.forEach(t),HO=s(hr),Lu=a(hr,"TD",{align:!0});var Use=l(Lu);UO=o(Use,"\u2705"),Use.forEach(t),hr.forEach(t),qO=s(f),ge=a(f,"TR",{});var cr=l(ge);Su=a(cr,"TD",{align:!0});var qse=l(Su);jO=o(qse,"MaskFormer"),qse.forEach(t),QO=s(cr),wu=a(cr,"TD",{align:!0});var jse=l(wu);$O=o(jse,"\u274C"),jse.forEach(t),eF=s(cr),Mu=a(cr,"TD",{align:!0});var Qse=l(Mu);tF=o(Qse,"\u274C"),Qse.forEach(t),rF=s(cr),Pu=a(cr,"TD",{align:!0});var $se=l(Pu);aF=o($se,"\u2705"),$se.forEach(t),lF=s(cr),Gu=a(cr,"TD",{align:!0});var ehe=l(Gu);nF=o(ehe,"\u274C"),ehe.forEach(t),oF=s(cr),Nu=a(cr,"TD",{align:!0});var the=l(Nu);iF=o(the,"\u274C"),the.forEach(t),cr.forEach(t),dF=s(f),fe=a(f,"TR",{});var gr=l(fe);ku=a(gr,"TD",{align:!0});var rhe=l(ku);sF=o(rhe,"mBART"),rhe.forEach(t),hF=s(gr),Cu=a(gr,"TD",{align:!0});var ahe=l(Cu);cF=o(ahe,"\u2705"),ahe.forEach(t),gF=s(gr),Iu=a(gr,"TD",{align:!0});var lhe=l(Iu);fF=o(lhe,"\u2705"),lhe.forEach(t),uF=s(gr),Bu=a(gr,"TD",{align:!0});var nhe=l(Bu);vF=o(nhe,"\u2705"),nhe.forEach(t),EF=s(gr),xu=a(gr,"TD",{align:!0});var ohe=l(xu);TF=o(ohe,"\u2705"),ohe.forEach(t),_F=s(gr),Ou=a(gr,"TD",{align:!0});var ihe=l(Ou);mF=o(ihe,"\u2705"),ihe.forEach(t),gr.forEach(t),pF=s(f),ue=a(f,"TR",{});var fr=l(ue);Fu=a(fr,"TD",{align:!0});var dhe=l(Fu);DF=o(dhe,"MegatronBert"),dhe.forEach(t),AF=s(fr),Wu=a(fr,"TD",{align:!0});var she=l(Wu);RF=o(she,"\u274C"),she.forEach(t),yF=s(fr),Ju=a(fr,"TD",{align:!0});var hhe=l(Ju);bF=o(hhe,"\u274C"),hhe.forEach(t),LF=s(fr),Yu=a(fr,"TD",{align:!0});var che=l(Yu);SF=o(che,"\u2705"),che.forEach(t),wF=s(fr),Ku=a(fr,"TD",{align:!0});var ghe=l(Ku);MF=o(ghe,"\u274C"),ghe.forEach(t),PF=s(fr),Vu=a(fr,"TD",{align:!0});var fhe=l(Vu);GF=o(fhe,"\u274C"),fhe.forEach(t),fr.forEach(t),NF=s(f),ve=a(f,"TR",{});var ur=l(ve);Zu=a(ur,"TD",{align:!0});var uhe=l(Zu);kF=o(uhe,"MobileBERT"),uhe.forEach(t),CF=s(ur),Xu=a(ur,"TD",{align:!0});var vhe=l(Xu);IF=o(vhe,"\u2705"),vhe.forEach(t),BF=s(ur),zu=a(ur,"TD",{align:!0});var Ehe=l(zu);xF=o(Ehe,"\u2705"),Ehe.forEach(t),OF=s(ur),Hu=a(ur,"TD",{align:!0});var The=l(Hu);FF=o(The,"\u2705"),The.forEach(t),WF=s(ur),Uu=a(ur,"TD",{align:!0});var _he=l(Uu);JF=o(_he,"\u2705"),_he.forEach(t),YF=s(ur),qu=a(ur,"TD",{align:!0});var mhe=l(qu);KF=o(mhe,"\u274C"),mhe.forEach(t),ur.forEach(t),VF=s(f),Ee=a(f,"TR",{});var vr=l(Ee);ju=a(vr,"TD",{align:!0});var phe=l(ju);ZF=o(phe,"MPNet"),phe.forEach(t),XF=s(vr),Qu=a(vr,"TD",{align:!0});var Dhe=l(Qu);zF=o(Dhe,"\u2705"),Dhe.forEach(t),HF=s(vr),$u=a(vr,"TD",{align:!0});var Ahe=l($u);UF=o(Ahe,"\u2705"),Ahe.forEach(t),qF=s(vr),ev=a(vr,"TD",{align:!0});var Rhe=l(ev);jF=o(Rhe,"\u2705"),Rhe.forEach(t),QF=s(vr),tv=a(vr,"TD",{align:!0});var yhe=l(tv);$F=o(yhe,"\u2705"),yhe.forEach(t),eW=s(vr),rv=a(vr,"TD",{align:!0});var bhe=l(rv);tW=o(bhe,"\u274C"),bhe.forEach(t),vr.forEach(t),rW=s(f),Te=a(f,"TR",{});var Er=l(Te);av=a(Er,"TD",{align:!0});var Lhe=l(av);aW=o(Lhe,"mT5"),Lhe.forEach(t),lW=s(Er),lv=a(Er,"TD",{align:!0});var She=l(lv);nW=o(She,"\u2705"),She.forEach(t),oW=s(Er),nv=a(Er,"TD",{align:!0});var whe=l(nv);iW=o(whe,"\u2705"),whe.forEach(t),dW=s(Er),ov=a(Er,"TD",{align:!0});var Mhe=l(ov);sW=o(Mhe,"\u2705"),Mhe.forEach(t),hW=s(Er),iv=a(Er,"TD",{align:!0});var Phe=l(iv);cW=o(Phe,"\u2705"),Phe.forEach(t),gW=s(Er),dv=a(Er,"TD",{align:!0});var Ghe=l(dv);fW=o(Ghe,"\u2705"),Ghe.forEach(t),Er.forEach(t),uW=s(f),_e=a(f,"TR",{});var Tr=l(_e);sv=a(Tr,"TD",{align:!0});var Nhe=l(sv);vW=o(Nhe,"Nystromformer"),Nhe.forEach(t),EW=s(Tr),hv=a(Tr,"TD",{align:!0});var khe=l(hv);TW=o(khe,"\u274C"),khe.forEach(t),_W=s(Tr),cv=a(Tr,"TD",{align:!0});var Che=l(cv);mW=o(Che,"\u274C"),Che.forEach(t),pW=s(Tr),gv=a(Tr,"TD",{align:!0});var Ihe=l(gv);DW=o(Ihe,"\u2705"),Ihe.forEach(t),AW=s(Tr),fv=a(Tr,"TD",{align:!0});var Bhe=l(fv);RW=o(Bhe,"\u274C"),Bhe.forEach(t),yW=s(Tr),uv=a(Tr,"TD",{align:!0});var xhe=l(uv);bW=o(xhe,"\u274C"),xhe.forEach(t),Tr.forEach(t),LW=s(f),me=a(f,"TR",{});var _r=l(me);vv=a(_r,"TD",{align:!0});var Ohe=l(vv);SW=o(Ohe,"OpenAI GPT"),Ohe.forEach(t),wW=s(_r),Ev=a(_r,"TD",{align:!0});var Fhe=l(Ev);MW=o(Fhe,"\u2705"),Fhe.forEach(t),PW=s(_r),Tv=a(_r,"TD",{align:!0});var Whe=l(Tv);GW=o(Whe,"\u2705"),Whe.forEach(t),NW=s(_r),_v=a(_r,"TD",{align:!0});var Jhe=l(_v);kW=o(Jhe,"\u2705"),Jhe.forEach(t),CW=s(_r),mv=a(_r,"TD",{align:!0});var Yhe=l(mv);IW=o(Yhe,"\u2705"),Yhe.forEach(t),BW=s(_r),pv=a(_r,"TD",{align:!0});var Khe=l(pv);xW=o(Khe,"\u274C"),Khe.forEach(t),_r.forEach(t),OW=s(f),pe=a(f,"TR",{});var mr=l(pe);Dv=a(mr,"TD",{align:!0});var Vhe=l(Dv);FW=o(Vhe,"OpenAI GPT-2"),Vhe.forEach(t),WW=s(mr),Av=a(mr,"TD",{align:!0});var Zhe=l(Av);JW=o(Zhe,"\u2705"),Zhe.forEach(t),YW=s(mr),Rv=a(mr,"TD",{align:!0});var Xhe=l(Rv);KW=o(Xhe,"\u2705"),Xhe.forEach(t),VW=s(mr),yv=a(mr,"TD",{align:!0});var zhe=l(yv);ZW=o(zhe,"\u2705"),zhe.forEach(t),XW=s(mr),bv=a(mr,"TD",{align:!0});var Hhe=l(bv);zW=o(Hhe,"\u2705"),Hhe.forEach(t),HW=s(mr),Lv=a(mr,"TD",{align:!0});var Uhe=l(Lv);UW=o(Uhe,"\u2705"),Uhe.forEach(t),mr.forEach(t),qW=s(f),De=a(f,"TR",{});var pr=l(De);Sv=a(pr,"TD",{align:!0});var qhe=l(Sv);jW=o(qhe,"Pegasus"),qhe.forEach(t),QW=s(pr),wv=a(pr,"TD",{align:!0});var jhe=l(wv);$W=o(jhe,"\u2705"),jhe.forEach(t),eJ=s(pr),Mv=a(pr,"TD",{align:!0});var Qhe=l(Mv);tJ=o(Qhe,"\u2705"),Qhe.forEach(t),rJ=s(pr),Pv=a(pr,"TD",{align:!0});var $he=l(Pv);aJ=o($he,"\u2705"),$he.forEach(t),lJ=s(pr),Gv=a(pr,"TD",{align:!0});var ece=l(Gv);nJ=o(ece,"\u2705"),ece.forEach(t),oJ=s(pr),Nv=a(pr,"TD",{align:!0});var tce=l(Nv);iJ=o(tce,"\u2705"),tce.forEach(t),pr.forEach(t),dJ=s(f),Ae=a(f,"TR",{});var Dr=l(Ae);kv=a(Dr,"TD",{align:!0});var rce=l(kv);sJ=o(rce,"Perceiver"),rce.forEach(t),hJ=s(Dr),Cv=a(Dr,"TD",{align:!0});var ace=l(Cv);cJ=o(ace,"\u2705"),ace.forEach(t),gJ=s(Dr),Iv=a(Dr,"TD",{align:!0});var lce=l(Iv);fJ=o(lce,"\u274C"),lce.forEach(t),uJ=s(Dr),Bv=a(Dr,"TD",{align:!0});var nce=l(Bv);vJ=o(nce,"\u2705"),nce.forEach(t),EJ=s(Dr),xv=a(Dr,"TD",{align:!0});var oce=l(xv);TJ=o(oce,"\u274C"),oce.forEach(t),_J=s(Dr),Ov=a(Dr,"TD",{align:!0});var ice=l(Ov);mJ=o(ice,"\u274C"),ice.forEach(t),Dr.forEach(t),pJ=s(f),Re=a(f,"TR",{});var Ar=l(Re);Fv=a(Ar,"TD",{align:!0});var dce=l(Fv);DJ=o(dce,"PLBart"),dce.forEach(t),AJ=s(Ar),Wv=a(Ar,"TD",{align:!0});var sce=l(Wv);RJ=o(sce,"\u2705"),sce.forEach(t),yJ=s(Ar),Jv=a(Ar,"TD",{align:!0});var hce=l(Jv);bJ=o(hce,"\u274C"),hce.forEach(t),LJ=s(Ar),Yv=a(Ar,"TD",{align:!0});var cce=l(Yv);SJ=o(cce,"\u2705"),cce.forEach(t),wJ=s(Ar),Kv=a(Ar,"TD",{align:!0});var gce=l(Kv);MJ=o(gce,"\u274C"),gce.forEach(t),PJ=s(Ar),Vv=a(Ar,"TD",{align:!0});var fce=l(Vv);GJ=o(fce,"\u274C"),fce.forEach(t),Ar.forEach(t),NJ=s(f),ye=a(f,"TR",{});var Rr=l(ye);Zv=a(Rr,"TD",{align:!0});var uce=l(Zv);kJ=o(uce,"PoolFormer"),uce.forEach(t),CJ=s(Rr),Xv=a(Rr,"TD",{align:!0});var vce=l(Xv);IJ=o(vce,"\u274C"),vce.forEach(t),BJ=s(Rr),zv=a(Rr,"TD",{align:!0});var Ece=l(zv);xJ=o(Ece,"\u274C"),Ece.forEach(t),OJ=s(Rr),Hv=a(Rr,"TD",{align:!0});var Tce=l(Hv);FJ=o(Tce,"\u2705"),Tce.forEach(t),WJ=s(Rr),Uv=a(Rr,"TD",{align:!0});var _ce=l(Uv);JJ=o(_ce,"\u274C"),_ce.forEach(t),YJ=s(Rr),qv=a(Rr,"TD",{align:!0});var mce=l(qv);KJ=o(mce,"\u274C"),mce.forEach(t),Rr.forEach(t),VJ=s(f),be=a(f,"TR",{});var yr=l(be);jv=a(yr,"TD",{align:!0});var pce=l(jv);ZJ=o(pce,"ProphetNet"),pce.forEach(t),XJ=s(yr),Qv=a(yr,"TD",{align:!0});var Dce=l(Qv);zJ=o(Dce,"\u2705"),Dce.forEach(t),HJ=s(yr),$v=a(yr,"TD",{align:!0});var Ace=l($v);UJ=o(Ace,"\u274C"),Ace.forEach(t),qJ=s(yr),eE=a(yr,"TD",{align:!0});var Rce=l(eE);jJ=o(Rce,"\u2705"),Rce.forEach(t),QJ=s(yr),tE=a(yr,"TD",{align:!0});var yce=l(tE);$J=o(yce,"\u274C"),yce.forEach(t),eY=s(yr),rE=a(yr,"TD",{align:!0});var bce=l(rE);tY=o(bce,"\u274C"),bce.forEach(t),yr.forEach(t),rY=s(f),Le=a(f,"TR",{});var br=l(Le);aE=a(br,"TD",{align:!0});var Lce=l(aE);aY=o(Lce,"QDQBert"),Lce.forEach(t),lY=s(br),lE=a(br,"TD",{align:!0});var Sce=l(lE);nY=o(Sce,"\u274C"),Sce.forEach(t),oY=s(br),nE=a(br,"TD",{align:!0});var wce=l(nE);iY=o(wce,"\u274C"),wce.forEach(t),dY=s(br),oE=a(br,"TD",{align:!0});var Mce=l(oE);sY=o(Mce,"\u2705"),Mce.forEach(t),hY=s(br),iE=a(br,"TD",{align:!0});var Pce=l(iE);cY=o(Pce,"\u274C"),Pce.forEach(t),gY=s(br),dE=a(br,"TD",{align:!0});var Gce=l(dE);fY=o(Gce,"\u274C"),Gce.forEach(t),br.forEach(t),uY=s(f),Se=a(f,"TR",{});var Lr=l(Se);sE=a(Lr,"TD",{align:!0});var Nce=l(sE);vY=o(Nce,"RAG"),Nce.forEach(t),EY=s(Lr),hE=a(Lr,"TD",{align:!0});var kce=l(hE);TY=o(kce,"\u2705"),kce.forEach(t),_Y=s(Lr),cE=a(Lr,"TD",{align:!0});var Cce=l(cE);mY=o(Cce,"\u274C"),Cce.forEach(t),pY=s(Lr),gE=a(Lr,"TD",{align:!0});var Ice=l(gE);DY=o(Ice,"\u2705"),Ice.forEach(t),AY=s(Lr),fE=a(Lr,"TD",{align:!0});var Bce=l(fE);RY=o(Bce,"\u2705"),Bce.forEach(t),yY=s(Lr),uE=a(Lr,"TD",{align:!0});var xce=l(uE);bY=o(xce,"\u274C"),xce.forEach(t),Lr.forEach(t),LY=s(f),we=a(f,"TR",{});var Sr=l(we);vE=a(Sr,"TD",{align:!0});var Oce=l(vE);SY=o(Oce,"Realm"),Oce.forEach(t),wY=s(Sr),EE=a(Sr,"TD",{align:!0});var Fce=l(EE);MY=o(Fce,"\u2705"),Fce.forEach(t),PY=s(Sr),TE=a(Sr,"TD",{align:!0});var Wce=l(TE);GY=o(Wce,"\u2705"),Wce.forEach(t),NY=s(Sr),_E=a(Sr,"TD",{align:!0});var Jce=l(_E);kY=o(Jce,"\u2705"),Jce.forEach(t),CY=s(Sr),mE=a(Sr,"TD",{align:!0});var Yce=l(mE);IY=o(Yce,"\u274C"),Yce.forEach(t),BY=s(Sr),pE=a(Sr,"TD",{align:!0});var Kce=l(pE);xY=o(Kce,"\u274C"),Kce.forEach(t),Sr.forEach(t),OY=s(f),Me=a(f,"TR",{});var wr=l(Me);DE=a(wr,"TD",{align:!0});var Vce=l(DE);FY=o(Vce,"Reformer"),Vce.forEach(t),WY=s(wr),AE=a(wr,"TD",{align:!0});var Zce=l(AE);JY=o(Zce,"\u2705"),Zce.forEach(t),YY=s(wr),RE=a(wr,"TD",{align:!0});var Xce=l(RE);KY=o(Xce,"\u2705"),Xce.forEach(t),VY=s(wr),yE=a(wr,"TD",{align:!0});var zce=l(yE);ZY=o(zce,"\u2705"),zce.forEach(t),XY=s(wr),bE=a(wr,"TD",{align:!0});var Hce=l(bE);zY=o(Hce,"\u274C"),Hce.forEach(t),HY=s(wr),LE=a(wr,"TD",{align:!0});var Uce=l(LE);UY=o(Uce,"\u274C"),Uce.forEach(t),wr.forEach(t),qY=s(f),Pe=a(f,"TR",{});var Mr=l(Pe);SE=a(Mr,"TD",{align:!0});var qce=l(SE);jY=o(qce,"RegNet"),qce.forEach(t),QY=s(Mr),wE=a(Mr,"TD",{align:!0});var jce=l(wE);$Y=o(jce,"\u274C"),jce.forEach(t),eK=s(Mr),ME=a(Mr,"TD",{align:!0});var Qce=l(ME);tK=o(Qce,"\u274C"),Qce.forEach(t),rK=s(Mr),PE=a(Mr,"TD",{align:!0});var $ce=l(PE);aK=o($ce,"\u2705"),$ce.forEach(t),lK=s(Mr),GE=a(Mr,"TD",{align:!0});var ege=l(GE);nK=o(ege,"\u274C"),ege.forEach(t),oK=s(Mr),NE=a(Mr,"TD",{align:!0});var tge=l(NE);iK=o(tge,"\u274C"),tge.forEach(t),Mr.forEach(t),dK=s(f),Ge=a(f,"TR",{});var Pr=l(Ge);kE=a(Pr,"TD",{align:!0});var rge=l(kE);sK=o(rge,"RemBERT"),rge.forEach(t),hK=s(Pr),CE=a(Pr,"TD",{align:!0});var age=l(CE);cK=o(age,"\u2705"),age.forEach(t),gK=s(Pr),IE=a(Pr,"TD",{align:!0});var lge=l(IE);fK=o(lge,"\u2705"),lge.forEach(t),uK=s(Pr),BE=a(Pr,"TD",{align:!0});var nge=l(BE);vK=o(nge,"\u2705"),nge.forEach(t),EK=s(Pr),xE=a(Pr,"TD",{align:!0});var oge=l(xE);TK=o(oge,"\u2705"),oge.forEach(t),_K=s(Pr),OE=a(Pr,"TD",{align:!0});var ige=l(OE);mK=o(ige,"\u274C"),ige.forEach(t),Pr.forEach(t),pK=s(f),Ne=a(f,"TR",{});var Gr=l(Ne);FE=a(Gr,"TD",{align:!0});var dge=l(FE);DK=o(dge,"ResNet"),dge.forEach(t),AK=s(Gr),WE=a(Gr,"TD",{align:!0});var sge=l(WE);RK=o(sge,"\u274C"),sge.forEach(t),yK=s(Gr),JE=a(Gr,"TD",{align:!0});var hge=l(JE);bK=o(hge,"\u274C"),hge.forEach(t),LK=s(Gr),YE=a(Gr,"TD",{align:!0});var cge=l(YE);SK=o(cge,"\u2705"),cge.forEach(t),wK=s(Gr),KE=a(Gr,"TD",{align:!0});var gge=l(KE);MK=o(gge,"\u274C"),gge.forEach(t),PK=s(Gr),VE=a(Gr,"TD",{align:!0});var fge=l(VE);GK=o(fge,"\u274C"),fge.forEach(t),Gr.forEach(t),NK=s(f),ke=a(f,"TR",{});var Nr=l(ke);ZE=a(Nr,"TD",{align:!0});var uge=l(ZE);kK=o(uge,"RetriBERT"),uge.forEach(t),CK=s(Nr),XE=a(Nr,"TD",{align:!0});var vge=l(XE);IK=o(vge,"\u2705"),vge.forEach(t),BK=s(Nr),zE=a(Nr,"TD",{align:!0});var Ege=l(zE);xK=o(Ege,"\u2705"),Ege.forEach(t),OK=s(Nr),HE=a(Nr,"TD",{align:!0});var Tge=l(HE);FK=o(Tge,"\u2705"),Tge.forEach(t),WK=s(Nr),UE=a(Nr,"TD",{align:!0});var _ge=l(UE);JK=o(_ge,"\u274C"),_ge.forEach(t),YK=s(Nr),qE=a(Nr,"TD",{align:!0});var mge=l(qE);KK=o(mge,"\u274C"),mge.forEach(t),Nr.forEach(t),VK=s(f),Ce=a(f,"TR",{});var kr=l(Ce);jE=a(kr,"TD",{align:!0});var pge=l(jE);ZK=o(pge,"RoBERTa"),pge.forEach(t),XK=s(kr),QE=a(kr,"TD",{align:!0});var Dge=l(QE);zK=o(Dge,"\u2705"),Dge.forEach(t),HK=s(kr),$E=a(kr,"TD",{align:!0});var Age=l($E);UK=o(Age,"\u2705"),Age.forEach(t),qK=s(kr),e2=a(kr,"TD",{align:!0});var Rge=l(e2);jK=o(Rge,"\u2705"),Rge.forEach(t),QK=s(kr),t2=a(kr,"TD",{align:!0});var yge=l(t2);$K=o(yge,"\u2705"),yge.forEach(t),eV=s(kr),r2=a(kr,"TD",{align:!0});var bge=l(r2);tV=o(bge,"\u2705"),bge.forEach(t),kr.forEach(t),rV=s(f),Ie=a(f,"TR",{});var Cr=l(Ie);a2=a(Cr,"TD",{align:!0});var Lge=l(a2);aV=o(Lge,"RoFormer"),Lge.forEach(t),lV=s(Cr),l2=a(Cr,"TD",{align:!0});var Sge=l(l2);nV=o(Sge,"\u2705"),Sge.forEach(t),oV=s(Cr),n2=a(Cr,"TD",{align:!0});var wge=l(n2);iV=o(wge,"\u2705"),wge.forEach(t),dV=s(Cr),o2=a(Cr,"TD",{align:!0});var Mge=l(o2);sV=o(Mge,"\u2705"),Mge.forEach(t),hV=s(Cr),i2=a(Cr,"TD",{align:!0});var Pge=l(i2);cV=o(Pge,"\u2705"),Pge.forEach(t),gV=s(Cr),d2=a(Cr,"TD",{align:!0});var Gge=l(d2);fV=o(Gge,"\u2705"),Gge.forEach(t),Cr.forEach(t),uV=s(f),Be=a(f,"TR",{});var Ir=l(Be);s2=a(Ir,"TD",{align:!0});var Nge=l(s2);vV=o(Nge,"SegFormer"),Nge.forEach(t),EV=s(Ir),h2=a(Ir,"TD",{align:!0});var kge=l(h2);TV=o(kge,"\u274C"),kge.forEach(t),_V=s(Ir),c2=a(Ir,"TD",{align:!0});var Cge=l(c2);mV=o(Cge,"\u274C"),Cge.forEach(t),pV=s(Ir),g2=a(Ir,"TD",{align:!0});var Ige=l(g2);DV=o(Ige,"\u2705"),Ige.forEach(t),AV=s(Ir),f2=a(Ir,"TD",{align:!0});var Bge=l(f2);RV=o(Bge,"\u274C"),Bge.forEach(t),yV=s(Ir),u2=a(Ir,"TD",{align:!0});var xge=l(u2);bV=o(xge,"\u274C"),xge.forEach(t),Ir.forEach(t),LV=s(f),xe=a(f,"TR",{});var Br=l(xe);v2=a(Br,"TD",{align:!0});var Oge=l(v2);SV=o(Oge,"SEW"),Oge.forEach(t),wV=s(Br),E2=a(Br,"TD",{align:!0});var Fge=l(E2);MV=o(Fge,"\u274C"),Fge.forEach(t),PV=s(Br),T2=a(Br,"TD",{align:!0});var Wge=l(T2);GV=o(Wge,"\u274C"),Wge.forEach(t),NV=s(Br),_2=a(Br,"TD",{align:!0});var Jge=l(_2);kV=o(Jge,"\u2705"),Jge.forEach(t),CV=s(Br),m2=a(Br,"TD",{align:!0});var Yge=l(m2);IV=o(Yge,"\u274C"),Yge.forEach(t),BV=s(Br),p2=a(Br,"TD",{align:!0});var Kge=l(p2);xV=o(Kge,"\u274C"),Kge.forEach(t),Br.forEach(t),OV=s(f),Oe=a(f,"TR",{});var xr=l(Oe);D2=a(xr,"TD",{align:!0});var Vge=l(D2);FV=o(Vge,"SEW-D"),Vge.forEach(t),WV=s(xr),A2=a(xr,"TD",{align:!0});var Zge=l(A2);JV=o(Zge,"\u274C"),Zge.forEach(t),YV=s(xr),R2=a(xr,"TD",{align:!0});var Xge=l(R2);KV=o(Xge,"\u274C"),Xge.forEach(t),VV=s(xr),y2=a(xr,"TD",{align:!0});var zge=l(y2);ZV=o(zge,"\u2705"),zge.forEach(t),XV=s(xr),b2=a(xr,"TD",{align:!0});var Hge=l(b2);zV=o(Hge,"\u274C"),Hge.forEach(t),HV=s(xr),L2=a(xr,"TD",{align:!0});var Uge=l(L2);UV=o(Uge,"\u274C"),Uge.forEach(t),xr.forEach(t),qV=s(f),Fe=a(f,"TR",{});var Or=l(Fe);S2=a(Or,"TD",{align:!0});var qge=l(S2);jV=o(qge,"Speech Encoder decoder"),qge.forEach(t),QV=s(Or),w2=a(Or,"TD",{align:!0});var jge=l(w2);$V=o(jge,"\u274C"),jge.forEach(t),eZ=s(Or),M2=a(Or,"TD",{align:!0});var Qge=l(M2);tZ=o(Qge,"\u274C"),Qge.forEach(t),rZ=s(Or),P2=a(Or,"TD",{align:!0});var $ge=l(P2);aZ=o($ge,"\u2705"),$ge.forEach(t),lZ=s(Or),G2=a(Or,"TD",{align:!0});var e1e=l(G2);nZ=o(e1e,"\u274C"),e1e.forEach(t),oZ=s(Or),N2=a(Or,"TD",{align:!0});var t1e=l(N2);iZ=o(t1e,"\u2705"),t1e.forEach(t),Or.forEach(t),dZ=s(f),We=a(f,"TR",{});var Fr=l(We);k2=a(Fr,"TD",{align:!0});var r1e=l(k2);sZ=o(r1e,"Speech2Text"),r1e.forEach(t),hZ=s(Fr),C2=a(Fr,"TD",{align:!0});var a1e=l(C2);cZ=o(a1e,"\u2705"),a1e.forEach(t),gZ=s(Fr),I2=a(Fr,"TD",{align:!0});var l1e=l(I2);fZ=o(l1e,"\u274C"),l1e.forEach(t),uZ=s(Fr),B2=a(Fr,"TD",{align:!0});var n1e=l(B2);vZ=o(n1e,"\u2705"),n1e.forEach(t),EZ=s(Fr),x2=a(Fr,"TD",{align:!0});var o1e=l(x2);TZ=o(o1e,"\u2705"),o1e.forEach(t),_Z=s(Fr),O2=a(Fr,"TD",{align:!0});var i1e=l(O2);mZ=o(i1e,"\u274C"),i1e.forEach(t),Fr.forEach(t),pZ=s(f),Je=a(f,"TR",{});var Wr=l(Je);F2=a(Wr,"TD",{align:!0});var d1e=l(F2);DZ=o(d1e,"Speech2Text2"),d1e.forEach(t),AZ=s(Wr),W2=a(Wr,"TD",{align:!0});var s1e=l(W2);RZ=o(s1e,"\u2705"),s1e.forEach(t),yZ=s(Wr),J2=a(Wr,"TD",{align:!0});var h1e=l(J2);bZ=o(h1e,"\u274C"),h1e.forEach(t),LZ=s(Wr),Y2=a(Wr,"TD",{align:!0});var c1e=l(Y2);SZ=o(c1e,"\u274C"),c1e.forEach(t),wZ=s(Wr),K2=a(Wr,"TD",{align:!0});var g1e=l(K2);MZ=o(g1e,"\u274C"),g1e.forEach(t),PZ=s(Wr),V2=a(Wr,"TD",{align:!0});var f1e=l(V2);GZ=o(f1e,"\u274C"),f1e.forEach(t),Wr.forEach(t),NZ=s(f),Ye=a(f,"TR",{});var Jr=l(Ye);Z2=a(Jr,"TD",{align:!0});var u1e=l(Z2);kZ=o(u1e,"Splinter"),u1e.forEach(t),CZ=s(Jr),X2=a(Jr,"TD",{align:!0});var v1e=l(X2);IZ=o(v1e,"\u2705"),v1e.forEach(t),BZ=s(Jr),z2=a(Jr,"TD",{align:!0});var E1e=l(z2);xZ=o(E1e,"\u2705"),E1e.forEach(t),OZ=s(Jr),H2=a(Jr,"TD",{align:!0});var T1e=l(H2);FZ=o(T1e,"\u2705"),T1e.forEach(t),WZ=s(Jr),U2=a(Jr,"TD",{align:!0});var _1e=l(U2);JZ=o(_1e,"\u274C"),_1e.forEach(t),YZ=s(Jr),q2=a(Jr,"TD",{align:!0});var m1e=l(q2);KZ=o(m1e,"\u274C"),m1e.forEach(t),Jr.forEach(t),VZ=s(f),Ke=a(f,"TR",{});var Yr=l(Ke);j2=a(Yr,"TD",{align:!0});var p1e=l(j2);ZZ=o(p1e,"SqueezeBERT"),p1e.forEach(t),XZ=s(Yr),Q2=a(Yr,"TD",{align:!0});var D1e=l(Q2);zZ=o(D1e,"\u2705"),D1e.forEach(t),HZ=s(Yr),$2=a(Yr,"TD",{align:!0});var A1e=l($2);UZ=o(A1e,"\u2705"),A1e.forEach(t),qZ=s(Yr),eT=a(Yr,"TD",{align:!0});var R1e=l(eT);jZ=o(R1e,"\u2705"),R1e.forEach(t),QZ=s(Yr),tT=a(Yr,"TD",{align:!0});var y1e=l(tT);$Z=o(y1e,"\u274C"),y1e.forEach(t),eX=s(Yr),rT=a(Yr,"TD",{align:!0});var b1e=l(rT);tX=o(b1e,"\u274C"),b1e.forEach(t),Yr.forEach(t),rX=s(f),Ve=a(f,"TR",{});var Kr=l(Ve);aT=a(Kr,"TD",{align:!0});var L1e=l(aT);aX=o(L1e,"Swin"),L1e.forEach(t),lX=s(Kr),lT=a(Kr,"TD",{align:!0});var S1e=l(lT);nX=o(S1e,"\u274C"),S1e.forEach(t),oX=s(Kr),nT=a(Kr,"TD",{align:!0});var w1e=l(nT);iX=o(w1e,"\u274C"),w1e.forEach(t),dX=s(Kr),oT=a(Kr,"TD",{align:!0});var M1e=l(oT);sX=o(M1e,"\u2705"),M1e.forEach(t),hX=s(Kr),iT=a(Kr,"TD",{align:!0});var P1e=l(iT);cX=o(P1e,"\u274C"),P1e.forEach(t),gX=s(Kr),dT=a(Kr,"TD",{align:!0});var G1e=l(dT);fX=o(G1e,"\u274C"),G1e.forEach(t),Kr.forEach(t),uX=s(f),Ze=a(f,"TR",{});var Vr=l(Ze);sT=a(Vr,"TD",{align:!0});var N1e=l(sT);vX=o(N1e,"T5"),N1e.forEach(t),EX=s(Vr),hT=a(Vr,"TD",{align:!0});var k1e=l(hT);TX=o(k1e,"\u2705"),k1e.forEach(t),_X=s(Vr),cT=a(Vr,"TD",{align:!0});var C1e=l(cT);mX=o(C1e,"\u2705"),C1e.forEach(t),pX=s(Vr),gT=a(Vr,"TD",{align:!0});var I1e=l(gT);DX=o(I1e,"\u2705"),I1e.forEach(t),AX=s(Vr),fT=a(Vr,"TD",{align:!0});var B1e=l(fT);RX=o(B1e,"\u2705"),B1e.forEach(t),yX=s(Vr),uT=a(Vr,"TD",{align:!0});var x1e=l(uT);bX=o(x1e,"\u2705"),x1e.forEach(t),Vr.forEach(t),LX=s(f),Xe=a(f,"TR",{});var Zr=l(Xe);vT=a(Zr,"TD",{align:!0});var O1e=l(vT);SX=o(O1e,"TAPAS"),O1e.forEach(t),wX=s(Zr),ET=a(Zr,"TD",{align:!0});var F1e=l(ET);MX=o(F1e,"\u2705"),F1e.forEach(t),PX=s(Zr),TT=a(Zr,"TD",{align:!0});var W1e=l(TT);GX=o(W1e,"\u274C"),W1e.forEach(t),NX=s(Zr),_T=a(Zr,"TD",{align:!0});var J1e=l(_T);kX=o(J1e,"\u2705"),J1e.forEach(t),CX=s(Zr),mT=a(Zr,"TD",{align:!0});var Y1e=l(mT);IX=o(Y1e,"\u2705"),Y1e.forEach(t),BX=s(Zr),pT=a(Zr,"TD",{align:!0});var K1e=l(pT);xX=o(K1e,"\u274C"),K1e.forEach(t),Zr.forEach(t),OX=s(f),ze=a(f,"TR",{});var Xr=l(ze);DT=a(Xr,"TD",{align:!0});var V1e=l(DT);FX=o(V1e,"TAPEX"),V1e.forEach(t),WX=s(Xr),AT=a(Xr,"TD",{align:!0});var Z1e=l(AT);JX=o(Z1e,"\u2705"),Z1e.forEach(t),YX=s(Xr),RT=a(Xr,"TD",{align:!0});var X1e=l(RT);KX=o(X1e,"\u2705"),X1e.forEach(t),VX=s(Xr),yT=a(Xr,"TD",{align:!0});var z1e=l(yT);ZX=o(z1e,"\u2705"),z1e.forEach(t),XX=s(Xr),bT=a(Xr,"TD",{align:!0});var H1e=l(bT);zX=o(H1e,"\u2705"),H1e.forEach(t),HX=s(Xr),LT=a(Xr,"TD",{align:!0});var U1e=l(LT);UX=o(U1e,"\u2705"),U1e.forEach(t),Xr.forEach(t),qX=s(f),He=a(f,"TR",{});var zr=l(He);ST=a(zr,"TD",{align:!0});var q1e=l(ST);jX=o(q1e,"Transformer-XL"),q1e.forEach(t),QX=s(zr),wT=a(zr,"TD",{align:!0});var j1e=l(wT);$X=o(j1e,"\u2705"),j1e.forEach(t),ez=s(zr),MT=a(zr,"TD",{align:!0});var Q1e=l(MT);tz=o(Q1e,"\u274C"),Q1e.forEach(t),rz=s(zr),PT=a(zr,"TD",{align:!0});var $1e=l(PT);az=o($1e,"\u2705"),$1e.forEach(t),lz=s(zr),GT=a(zr,"TD",{align:!0});var efe=l(GT);nz=o(efe,"\u2705"),efe.forEach(t),oz=s(zr),NT=a(zr,"TD",{align:!0});var tfe=l(NT);iz=o(tfe,"\u274C"),tfe.forEach(t),zr.forEach(t),dz=s(f),Ue=a(f,"TR",{});var Hr=l(Ue);kT=a(Hr,"TD",{align:!0});var rfe=l(kT);sz=o(rfe,"TrOCR"),rfe.forEach(t),hz=s(Hr),CT=a(Hr,"TD",{align:!0});var afe=l(CT);cz=o(afe,"\u274C"),afe.forEach(t),gz=s(Hr),IT=a(Hr,"TD",{align:!0});var lfe=l(IT);fz=o(lfe,"\u274C"),lfe.forEach(t),uz=s(Hr),BT=a(Hr,"TD",{align:!0});var nfe=l(BT);vz=o(nfe,"\u2705"),nfe.forEach(t),Ez=s(Hr),xT=a(Hr,"TD",{align:!0});var ofe=l(xT);Tz=o(ofe,"\u274C"),ofe.forEach(t),_z=s(Hr),OT=a(Hr,"TD",{align:!0});var ife=l(OT);mz=o(ife,"\u274C"),ife.forEach(t),Hr.forEach(t),pz=s(f),qe=a(f,"TR",{});var Ur=l(qe);FT=a(Ur,"TD",{align:!0});var dfe=l(FT);Dz=o(dfe,"UniSpeech"),dfe.forEach(t),Az=s(Ur),WT=a(Ur,"TD",{align:!0});var sfe=l(WT);Rz=o(sfe,"\u274C"),sfe.forEach(t),yz=s(Ur),JT=a(Ur,"TD",{align:!0});var hfe=l(JT);bz=o(hfe,"\u274C"),hfe.forEach(t),Lz=s(Ur),YT=a(Ur,"TD",{align:!0});var cfe=l(YT);Sz=o(cfe,"\u2705"),cfe.forEach(t),wz=s(Ur),KT=a(Ur,"TD",{align:!0});var gfe=l(KT);Mz=o(gfe,"\u274C"),gfe.forEach(t),Pz=s(Ur),VT=a(Ur,"TD",{align:!0});var ffe=l(VT);Gz=o(ffe,"\u274C"),ffe.forEach(t),Ur.forEach(t),Nz=s(f),je=a(f,"TR",{});var qr=l(je);ZT=a(qr,"TD",{align:!0});var ufe=l(ZT);kz=o(ufe,"UniSpeechSat"),ufe.forEach(t),Cz=s(qr),XT=a(qr,"TD",{align:!0});var vfe=l(XT);Iz=o(vfe,"\u274C"),vfe.forEach(t),Bz=s(qr),zT=a(qr,"TD",{align:!0});var Efe=l(zT);xz=o(Efe,"\u274C"),Efe.forEach(t),Oz=s(qr),HT=a(qr,"TD",{align:!0});var Tfe=l(HT);Fz=o(Tfe,"\u2705"),Tfe.forEach(t),Wz=s(qr),UT=a(qr,"TD",{align:!0});var _fe=l(UT);Jz=o(_fe,"\u274C"),_fe.forEach(t),Yz=s(qr),qT=a(qr,"TD",{align:!0});var mfe=l(qT);Kz=o(mfe,"\u274C"),mfe.forEach(t),qr.forEach(t),Vz=s(f),Qe=a(f,"TR",{});var jr=l(Qe);jT=a(jr,"TD",{align:!0});var pfe=l(jT);Zz=o(pfe,"VAN"),pfe.forEach(t),Xz=s(jr),QT=a(jr,"TD",{align:!0});var Dfe=l(QT);zz=o(Dfe,"\u274C"),Dfe.forEach(t),Hz=s(jr),$T=a(jr,"TD",{align:!0});var Afe=l($T);Uz=o(Afe,"\u274C"),Afe.forEach(t),qz=s(jr),e_=a(jr,"TD",{align:!0});var Rfe=l(e_);jz=o(Rfe,"\u2705"),Rfe.forEach(t),Qz=s(jr),t_=a(jr,"TD",{align:!0});var yfe=l(t_);$z=o(yfe,"\u274C"),yfe.forEach(t),eH=s(jr),r_=a(jr,"TD",{align:!0});var bfe=l(r_);tH=o(bfe,"\u274C"),bfe.forEach(t),jr.forEach(t),rH=s(f),$e=a(f,"TR",{});var Qr=l($e);a_=a(Qr,"TD",{align:!0});var Lfe=l(a_);aH=o(Lfe,"ViLT"),Lfe.forEach(t),lH=s(Qr),l_=a(Qr,"TD",{align:!0});var Sfe=l(l_);nH=o(Sfe,"\u274C"),Sfe.forEach(t),oH=s(Qr),n_=a(Qr,"TD",{align:!0});var wfe=l(n_);iH=o(wfe,"\u274C"),wfe.forEach(t),dH=s(Qr),o_=a(Qr,"TD",{align:!0});var Mfe=l(o_);sH=o(Mfe,"\u2705"),Mfe.forEach(t),hH=s(Qr),i_=a(Qr,"TD",{align:!0});var Pfe=l(i_);cH=o(Pfe,"\u274C"),Pfe.forEach(t),gH=s(Qr),d_=a(Qr,"TD",{align:!0});var Gfe=l(d_);fH=o(Gfe,"\u274C"),Gfe.forEach(t),Qr.forEach(t),uH=s(f),et=a(f,"TR",{});var $r=l(et);s_=a($r,"TD",{align:!0});var Nfe=l(s_);vH=o(Nfe,"Vision Encoder decoder"),Nfe.forEach(t),EH=s($r),h_=a($r,"TD",{align:!0});var kfe=l(h_);TH=o(kfe,"\u274C"),kfe.forEach(t),_H=s($r),c_=a($r,"TD",{align:!0});var Cfe=l(c_);mH=o(Cfe,"\u274C"),Cfe.forEach(t),pH=s($r),g_=a($r,"TD",{align:!0});var Ife=l(g_);DH=o(Ife,"\u2705"),Ife.forEach(t),AH=s($r),f_=a($r,"TD",{align:!0});var Bfe=l(f_);RH=o(Bfe,"\u2705"),Bfe.forEach(t),yH=s($r),u_=a($r,"TD",{align:!0});var xfe=l(u_);bH=o(xfe,"\u2705"),xfe.forEach(t),$r.forEach(t),LH=s(f),tt=a(f,"TR",{});var ea=l(tt);v_=a(ea,"TD",{align:!0});var Ofe=l(v_);SH=o(Ofe,"VisionTextDualEncoder"),Ofe.forEach(t),wH=s(ea),E_=a(ea,"TD",{align:!0});var Ffe=l(E_);MH=o(Ffe,"\u274C"),Ffe.forEach(t),PH=s(ea),T_=a(ea,"TD",{align:!0});var Wfe=l(T_);GH=o(Wfe,"\u274C"),Wfe.forEach(t),NH=s(ea),__=a(ea,"TD",{align:!0});var Jfe=l(__);kH=o(Jfe,"\u2705"),Jfe.forEach(t),CH=s(ea),m_=a(ea,"TD",{align:!0});var Yfe=l(m_);IH=o(Yfe,"\u274C"),Yfe.forEach(t),BH=s(ea),p_=a(ea,"TD",{align:!0});var Kfe=l(p_);xH=o(Kfe,"\u2705"),Kfe.forEach(t),ea.forEach(t),OH=s(f),rt=a(f,"TR",{});var ta=l(rt);D_=a(ta,"TD",{align:!0});var Vfe=l(D_);FH=o(Vfe,"VisualBert"),Vfe.forEach(t),WH=s(ta),A_=a(ta,"TD",{align:!0});var Zfe=l(A_);JH=o(Zfe,"\u274C"),Zfe.forEach(t),YH=s(ta),R_=a(ta,"TD",{align:!0});var Xfe=l(R_);KH=o(Xfe,"\u274C"),Xfe.forEach(t),VH=s(ta),y_=a(ta,"TD",{align:!0});var zfe=l(y_);ZH=o(zfe,"\u2705"),zfe.forEach(t),XH=s(ta),b_=a(ta,"TD",{align:!0});var Hfe=l(b_);zH=o(Hfe,"\u274C"),Hfe.forEach(t),HH=s(ta),L_=a(ta,"TD",{align:!0});var Ufe=l(L_);UH=o(Ufe,"\u274C"),Ufe.forEach(t),ta.forEach(t),qH=s(f),at=a(f,"TR",{});var ra=l(at);S_=a(ra,"TD",{align:!0});var qfe=l(S_);jH=o(qfe,"ViT"),qfe.forEach(t),QH=s(ra),w_=a(ra,"TD",{align:!0});var jfe=l(w_);$H=o(jfe,"\u274C"),jfe.forEach(t),eU=s(ra),M_=a(ra,"TD",{align:!0});var Qfe=l(M_);tU=o(Qfe,"\u274C"),Qfe.forEach(t),rU=s(ra),P_=a(ra,"TD",{align:!0});var $fe=l(P_);aU=o($fe,"\u2705"),$fe.forEach(t),lU=s(ra),G_=a(ra,"TD",{align:!0});var eue=l(G_);nU=o(eue,"\u2705"),eue.forEach(t),oU=s(ra),N_=a(ra,"TD",{align:!0});var tue=l(N_);iU=o(tue,"\u2705"),tue.forEach(t),ra.forEach(t),dU=s(f),lt=a(f,"TR",{});var aa=l(lt);k_=a(aa,"TD",{align:!0});var rue=l(k_);sU=o(rue,"ViTMAE"),rue.forEach(t),hU=s(aa),C_=a(aa,"TD",{align:!0});var aue=l(C_);cU=o(aue,"\u274C"),aue.forEach(t),gU=s(aa),I_=a(aa,"TD",{align:!0});var lue=l(I_);fU=o(lue,"\u274C"),lue.forEach(t),uU=s(aa),B_=a(aa,"TD",{align:!0});var nue=l(B_);vU=o(nue,"\u2705"),nue.forEach(t),EU=s(aa),x_=a(aa,"TD",{align:!0});var oue=l(x_);TU=o(oue,"\u2705"),oue.forEach(t),_U=s(aa),O_=a(aa,"TD",{align:!0});var iue=l(O_);mU=o(iue,"\u274C"),iue.forEach(t),aa.forEach(t),pU=s(f),nt=a(f,"TR",{});var la=l(nt);F_=a(la,"TD",{align:!0});var due=l(F_);DU=o(due,"Wav2Vec2"),due.forEach(t),AU=s(la),W_=a(la,"TD",{align:!0});var sue=l(W_);RU=o(sue,"\u2705"),sue.forEach(t),yU=s(la),J_=a(la,"TD",{align:!0});var hue=l(J_);bU=o(hue,"\u274C"),hue.forEach(t),LU=s(la),Y_=a(la,"TD",{align:!0});var cue=l(Y_);SU=o(cue,"\u2705"),cue.forEach(t),wU=s(la),K_=a(la,"TD",{align:!0});var gue=l(K_);MU=o(gue,"\u2705"),gue.forEach(t),PU=s(la),V_=a(la,"TD",{align:!0});var fue=l(V_);GU=o(fue,"\u2705"),fue.forEach(t),la.forEach(t),NU=s(f),ot=a(f,"TR",{});var na=l(ot);Z_=a(na,"TD",{align:!0});var uue=l(Z_);kU=o(uue,"WavLM"),uue.forEach(t),CU=s(na),X_=a(na,"TD",{align:!0});var vue=l(X_);IU=o(vue,"\u274C"),vue.forEach(t),BU=s(na),z_=a(na,"TD",{align:!0});var Eue=l(z_);xU=o(Eue,"\u274C"),Eue.forEach(t),OU=s(na),H_=a(na,"TD",{align:!0});var Tue=l(H_);FU=o(Tue,"\u2705"),Tue.forEach(t),WU=s(na),U_=a(na,"TD",{align:!0});var _ue=l(U_);JU=o(_ue,"\u274C"),_ue.forEach(t),YU=s(na),q_=a(na,"TD",{align:!0});var mue=l(q_);KU=o(mue,"\u274C"),mue.forEach(t),na.forEach(t),VU=s(f),it=a(f,"TR",{});var oa=l(it);j_=a(oa,"TD",{align:!0});var pue=l(j_);ZU=o(pue,"XGLM"),pue.forEach(t),XU=s(oa),Q_=a(oa,"TD",{align:!0});var Due=l(Q_);zU=o(Due,"\u2705"),Due.forEach(t),HU=s(oa),$_=a(oa,"TD",{align:!0});var Aue=l($_);UU=o(Aue,"\u2705"),Aue.forEach(t),qU=s(oa),e3=a(oa,"TD",{align:!0});var Rue=l(e3);jU=o(Rue,"\u2705"),Rue.forEach(t),QU=s(oa),t3=a(oa,"TD",{align:!0});var yue=l(t3);$U=o(yue,"\u274C"),yue.forEach(t),eq=s(oa),r3=a(oa,"TD",{align:!0});var bue=l(r3);tq=o(bue,"\u2705"),bue.forEach(t),oa.forEach(t),rq=s(f),dt=a(f,"TR",{});var ia=l(dt);a3=a(ia,"TD",{align:!0});var Lue=l(a3);aq=o(Lue,"XLM"),Lue.forEach(t),lq=s(ia),l3=a(ia,"TD",{align:!0});var Sue=l(l3);nq=o(Sue,"\u2705"),Sue.forEach(t),oq=s(ia),n3=a(ia,"TD",{align:!0});var wue=l(n3);iq=o(wue,"\u274C"),wue.forEach(t),dq=s(ia),o3=a(ia,"TD",{align:!0});var Mue=l(o3);sq=o(Mue,"\u2705"),Mue.forEach(t),hq=s(ia),i3=a(ia,"TD",{align:!0});var Pue=l(i3);cq=o(Pue,"\u2705"),Pue.forEach(t),gq=s(ia),d3=a(ia,"TD",{align:!0});var Gue=l(d3);fq=o(Gue,"\u274C"),Gue.forEach(t),ia.forEach(t),uq=s(f),st=a(f,"TR",{});var da=l(st);s3=a(da,"TD",{align:!0});var Nue=l(s3);vq=o(Nue,"XLM-RoBERTa"),Nue.forEach(t),Eq=s(da),h3=a(da,"TD",{align:!0});var kue=l(h3);Tq=o(kue,"\u2705"),kue.forEach(t),_q=s(da),c3=a(da,"TD",{align:!0});var Cue=l(c3);mq=o(Cue,"\u2705"),Cue.forEach(t),pq=s(da),g3=a(da,"TD",{align:!0});var Iue=l(g3);Dq=o(Iue,"\u2705"),Iue.forEach(t),Aq=s(da),f3=a(da,"TD",{align:!0});var Bue=l(f3);Rq=o(Bue,"\u2705"),Bue.forEach(t),yq=s(da),u3=a(da,"TD",{align:!0});var xue=l(u3);bq=o(xue,"\u2705"),xue.forEach(t),da.forEach(t),Lq=s(f),ht=a(f,"TR",{});var sa=l(ht);v3=a(sa,"TD",{align:!0});var Oue=l(v3);Sq=o(Oue,"XLM-RoBERTa-XL"),Oue.forEach(t),wq=s(sa),E3=a(sa,"TD",{align:!0});var Fue=l(E3);Mq=o(Fue,"\u274C"),Fue.forEach(t),Pq=s(sa),T3=a(sa,"TD",{align:!0});var Wue=l(T3);Gq=o(Wue,"\u274C"),Wue.forEach(t),Nq=s(sa),_3=a(sa,"TD",{align:!0});var Jue=l(_3);kq=o(Jue,"\u2705"),Jue.forEach(t),Cq=s(sa),m3=a(sa,"TD",{align:!0});var Yue=l(m3);Iq=o(Yue,"\u274C"),Yue.forEach(t),Bq=s(sa),p3=a(sa,"TD",{align:!0});var Kue=l(p3);xq=o(Kue,"\u274C"),Kue.forEach(t),sa.forEach(t),Oq=s(f),ct=a(f,"TR",{});var ha=l(ct);D3=a(ha,"TD",{align:!0});var Vue=l(D3);Fq=o(Vue,"XLMProphetNet"),Vue.forEach(t),Wq=s(ha),A3=a(ha,"TD",{align:!0});var Zue=l(A3);Jq=o(Zue,"\u2705"),Zue.forEach(t),Yq=s(ha),R3=a(ha,"TD",{align:!0});var Xue=l(R3);Kq=o(Xue,"\u274C"),Xue.forEach(t),Vq=s(ha),y3=a(ha,"TD",{align:!0});var zue=l(y3);Zq=o(zue,"\u2705"),zue.forEach(t),Xq=s(ha),b3=a(ha,"TD",{align:!0});var Hue=l(b3);zq=o(Hue,"\u274C"),Hue.forEach(t),Hq=s(ha),L3=a(ha,"TD",{align:!0});var Uue=l(L3);Uq=o(Uue,"\u274C"),Uue.forEach(t),ha.forEach(t),qq=s(f),gt=a(f,"TR",{});var ca=l(gt);S3=a(ca,"TD",{align:!0});var que=l(S3);jq=o(que,"XLNet"),que.forEach(t),Qq=s(ca),w3=a(ca,"TD",{align:!0});var jue=l(w3);$q=o(jue,"\u2705"),jue.forEach(t),ej=s(ca),M3=a(ca,"TD",{align:!0});var Que=l(M3);tj=o(Que,"\u2705"),Que.forEach(t),rj=s(ca),P3=a(ca,"TD",{align:!0});var $ue=l(P3);aj=o($ue,"\u2705"),$ue.forEach(t),lj=s(ca),G3=a(ca,"TD",{align:!0});var eve=l(G3);nj=o(eve,"\u2705"),eve.forEach(t),oj=s(ca),N3=a(ca,"TD",{align:!0});var tve=l(N3);ij=o(tve,"\u274C"),tve.forEach(t),ca.forEach(t),dj=s(f),ft=a(f,"TR",{});var ga=l(ft);k3=a(ga,"TD",{align:!0});var rve=l(k3);sj=o(rve,"YOSO"),rve.forEach(t),hj=s(ga),C3=a(ga,"TD",{align:!0});var ave=l(C3);cj=o(ave,"\u274C"),ave.forEach(t),gj=s(ga),I3=a(ga,"TD",{align:!0});var lve=l(I3);fj=o(lve,"\u274C"),lve.forEach(t),uj=s(ga),B3=a(ga,"TD",{align:!0});var nve=l(B3);vj=o(nve,"\u2705"),nve.forEach(t),Ej=s(ga),x3=a(ga,"TD",{align:!0});var ove=l(x3);Tj=o(ove,"\u274C"),ove.forEach(t),_j=s(ga),O3=a(ga,"TD",{align:!0});var ive=l(O3);mj=o(ive,"\u274C"),ive.forEach(t),ga.forEach(t),f.forEach(t),Up.forEach(t),this.h()},h(){i(Da,"name","hf:doc:metadata"),i(Da,"content",JSON.stringify(Eve)),i(Ma,"id","transformers"),i(Ma,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(Ma,"href","#transformers"),i(Aa,"class","relative group"),i(kn,"href","https://pytorch.org/"),i(kn,"rel","nofollow"),i(Cn,"href","https://www.tensorflow.org/"),i(Cn,"rel","nofollow"),i(In,"href","https://jax.readthedocs.io/en/latest/"),i(In,"rel","nofollow"),i(Pa,"id","se-voc-estiver-procurando-suporte-do-time-da-hugging-face-acesse"),i(Pa,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(Pa,"href","#se-voc-estiver-procurando-suporte-do-time-da-hugging-face-acesse"),i(Ra,"class","relative group"),i(ua,"alt","HuggingFace Expert Acceleration Program"),gve(ua.src,Pj="https://huggingface.co/front/thumbnails/support.png")||i(ua,"src",Pj),qp(ua,"max-width","600px"),qp(ua,"border","1px solid #eee"),qp(ua,"border-radius","4px"),qp(ua,"box-shadow","0 1px 2px 0 rgba(0, 0, 0, 0.05)"),i(ya,"target","_blank"),i(ya,"href","https://huggingface.co/support"),i(Ga,"id","contedo"),i(Ga,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(Ga,"href","#contedo"),i(ba,"class","relative group"),i(Na,"id","modelos-atuais"),i(Na,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(Na,"href","#modelos-atuais"),i(La,"class","relative group"),i(cd,"href","model_doc/albert"),i(Fn,"href","https://arxiv.org/abs/1909.11942"),i(Fn,"rel","nofollow"),i(gd,"href","model_doc/bart"),i(Wn,"href","https://arxiv.org/abs/1910.13461"),i(Wn,"rel","nofollow"),i(fd,"href","model_doc/barthez"),i(Jn,"href","https://arxiv.org/abs/2010.12321"),i(Jn,"rel","nofollow"),i(ud,"href","model_doc/bartpho"),i(Yn,"href","https://arxiv.org/abs/2109.09701"),i(Yn,"rel","nofollow"),i(vd,"href","model_doc/beit"),i(Kn,"href","https://arxiv.org/abs/2106.08254"),i(Kn,"rel","nofollow"),i(Ed,"href","model_doc/bert"),i(Vn,"href","https://arxiv.org/abs/1810.04805"),i(Vn,"rel","nofollow"),i(Td,"href","model_doc/bertweet"),i(Zn,"href","https://aclanthology.org/2020.emnlp-demos.2/"),i(Zn,"rel","nofollow"),i(_d,"href","model_doc/bert-generation"),i(Xn,"href","https://arxiv.org/abs/1907.12461"),i(Xn,"rel","nofollow"),i(md,"href","model_doc/big_bird"),i(zn,"href","https://arxiv.org/abs/2007.14062"),i(zn,"rel","nofollow"),i(pd,"href","model_doc/bigbird_pegasus"),i(Hn,"href","https://arxiv.org/abs/2007.14062"),i(Hn,"rel","nofollow"),i(Dd,"href","model_doc/blenderbot"),i(Un,"href","https://arxiv.org/abs/2004.13637"),i(Un,"rel","nofollow"),i(Ad,"href","model_doc/blenderbot-small"),i(qn,"href","https://arxiv.org/abs/2004.13637"),i(qn,"rel","nofollow"),i(Rd,"href","model_doc/bort"),i(jn,"href","https://arxiv.org/abs/2010.10499"),i(jn,"rel","nofollow"),i(yd,"href","model_doc/byt5"),i(Qn,"href","https://arxiv.org/abs/2105.13626"),i(Qn,"rel","nofollow"),i(bd,"href","model_doc/camembert"),i($n,"href","https://arxiv.org/abs/1911.03894"),i($n,"rel","nofollow"),i(Ld,"href","model_doc/canine"),i(eo,"href","https://arxiv.org/abs/2103.06874"),i(eo,"rel","nofollow"),i(Sd,"href","model_doc/convnext"),i(to,"href","https://arxiv.org/abs/2201.03545"),i(to,"rel","nofollow"),i(wd,"href","model_doc/clip"),i(ro,"href","https://arxiv.org/abs/2103.00020"),i(ro,"rel","nofollow"),i(Md,"href","model_doc/convbert"),i(ao,"href","https://arxiv.org/abs/2008.02496"),i(ao,"rel","nofollow"),i(Pd,"href","model_doc/cpm"),i(lo,"href","https://arxiv.org/abs/2012.00413"),i(lo,"rel","nofollow"),i(Gd,"href","model_doc/ctrl"),i(no,"href","https://arxiv.org/abs/1909.05858"),i(no,"rel","nofollow"),i(Nd,"href","model_doc/data2vec"),i(oo,"href","https://arxiv.org/abs/2202.03555"),i(oo,"rel","nofollow"),i(kd,"href","model_doc/deberta"),i(io,"href","https://arxiv.org/abs/2006.03654"),i(io,"rel","nofollow"),i(Cd,"href","model_doc/deberta-v2"),i(so,"href","https://arxiv.org/abs/2006.03654"),i(so,"rel","nofollow"),i(Id,"href","model_doc/decision_transformer"),i(ho,"href","https://arxiv.org/abs/2106.01345"),i(ho,"rel","nofollow"),i(Bd,"href","model_doc/dit"),i(co,"href","https://arxiv.org/abs/2203.02378"),i(co,"rel","nofollow"),i(xd,"href","model_doc/deit"),i(go,"href","https://arxiv.org/abs/2012.12877"),i(go,"rel","nofollow"),i(Od,"href","model_doc/detr"),i(fo,"href","https://arxiv.org/abs/2005.12872"),i(fo,"rel","nofollow"),i(Fd,"href","model_doc/dialogpt"),i(uo,"href","https://arxiv.org/abs/1911.00536"),i(uo,"rel","nofollow"),i(Wd,"href","model_doc/distilbert"),i(vo,"href","https://arxiv.org/abs/1910.01108"),i(vo,"rel","nofollow"),i(Eo,"href","https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation"),i(Eo,"rel","nofollow"),i(To,"href","https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation"),i(To,"rel","nofollow"),i(_o,"href","https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation"),i(_o,"rel","nofollow"),i(Jd,"href","model_doc/dpr"),i(mo,"href","https://arxiv.org/abs/2004.04906"),i(mo,"rel","nofollow"),i(Yd,"href","master/model_doc/dpt"),i(po,"href","https://arxiv.org/abs/2103.13413"),i(po,"rel","nofollow"),i(Kd,"href","model_doc/encoder-decoder"),i(Do,"href","https://arxiv.org/abs/1907.12461"),i(Do,"rel","nofollow"),i(Vd,"href","model_doc/electra"),i(Ao,"href","https://arxiv.org/abs/2003.10555"),i(Ao,"rel","nofollow"),i(Zd,"href","model_doc/flaubert"),i(Ro,"href","https://arxiv.org/abs/1912.05372"),i(Ro,"rel","nofollow"),i(Xd,"href","model_doc/fnet"),i(yo,"href","https://arxiv.org/abs/2105.03824"),i(yo,"rel","nofollow"),i(zd,"href","model_doc/funnel"),i(bo,"href","https://arxiv.org/abs/2006.03236"),i(bo,"rel","nofollow"),i(Hd,"href","model_doc/glpn"),i(Lo,"href","https://arxiv.org/abs/2201.07436"),i(Lo,"rel","nofollow"),i(Ud,"href","model_doc/openai-gpt"),i(So,"href","https://blog.openai.com/language-unsupervised/"),i(So,"rel","nofollow"),i(qd,"href","model_doc/gpt2"),i(wo,"href","https://blog.openai.com/better-language-models/"),i(wo,"rel","nofollow"),i(jd,"href","model_doc/gptj"),i(Mo,"href","https://github.com/kingoflolz/mesh-transformer-jax/"),i(Mo,"rel","nofollow"),i(Qd,"href","model_doc/gpt_neo"),i(Po,"href","https://github.com/EleutherAI/gpt-neo"),i(Po,"rel","nofollow"),i($d,"href","model_doc/hubert"),i(Go,"href","https://arxiv.org/abs/2106.07447"),i(Go,"rel","nofollow"),i(es,"href","model_doc/ibert"),i(No,"href","https://arxiv.org/abs/2101.01321"),i(No,"rel","nofollow"),i(ts,"href","model_doc/imagegpt"),i(ko,"href","https://openai.com/blog/image-gpt/"),i(ko,"rel","nofollow"),i(rs,"href","model_doc/layoutlm"),i(Co,"href","https://arxiv.org/abs/1912.13318"),i(Co,"rel","nofollow"),i(as,"href","model_doc/layoutlmv2"),i(Io,"href","https://arxiv.org/abs/2012.14740"),i(Io,"rel","nofollow"),i(ls,"href","model_doc/layoutxlm"),i(Bo,"href","https://arxiv.org/abs/2104.08836"),i(Bo,"rel","nofollow"),i(ns,"href","model_doc/led"),i(xo,"href","https://arxiv.org/abs/2004.05150"),i(xo,"rel","nofollow"),i(os,"href","model_doc/longformer"),i(Oo,"href","https://arxiv.org/abs/2004.05150"),i(Oo,"rel","nofollow"),i(is,"href","model_doc/luke"),i(Fo,"href","https://arxiv.org/abs/2010.01057"),i(Fo,"rel","nofollow"),i(ds,"href","model_doc/mluke"),i(Wo,"href","https://arxiv.org/abs/2110.08151"),i(Wo,"rel","nofollow"),i(ss,"href","model_doc/lxmert"),i(Jo,"href","https://arxiv.org/abs/1908.07490"),i(Jo,"rel","nofollow"),i(hs,"href","model_doc/m2m_100"),i(Yo,"href","https://arxiv.org/abs/2010.11125"),i(Yo,"rel","nofollow"),i(cs,"href","model_doc/marian"),i(Ko,"href","http://opus.nlpl.eu/"),i(Ko,"rel","nofollow"),i(Vo,"href","https://marian-nmt.github.io/"),i(Vo,"rel","nofollow"),i(gs,"href","model_doc/maskformer"),i(Zo,"href","https://arxiv.org/abs/2107.06278"),i(Zo,"rel","nofollow"),i(fs,"href","model_doc/mbart"),i(Xo,"href","https://arxiv.org/abs/2001.08210"),i(Xo,"rel","nofollow"),i(us,"href","model_doc/mbart"),i(zo,"href","https://arxiv.org/abs/2008.00401"),i(zo,"rel","nofollow"),i(vs,"href","model_doc/megatron-bert"),i(Ho,"href","https://arxiv.org/abs/1909.08053"),i(Ho,"rel","nofollow"),i(Es,"href","model_doc/megatron_gpt2"),i(Uo,"href","https://arxiv.org/abs/1909.08053"),i(Uo,"rel","nofollow"),i(Ts,"href","model_doc/mpnet"),i(qo,"href","https://arxiv.org/abs/2004.09297"),i(qo,"rel","nofollow"),i(_s,"href","model_doc/mt5"),i(jo,"href","https://arxiv.org/abs/2010.11934"),i(jo,"rel","nofollow"),i(ms,"href","model_doc/nystromformer"),i(Qo,"href","https://arxiv.org/abs/2102.03902"),i(Qo,"rel","nofollow"),i(ps,"href","model_doc/pegasus"),i($o,"href","https://arxiv.org/abs/1912.08777"),i($o,"rel","nofollow"),i(Ds,"href","model_doc/perceiver"),i(ei,"href","https://arxiv.org/abs/2107.14795"),i(ei,"rel","nofollow"),i(As,"href","model_doc/phobert"),i(ti,"href","https://www.aclweb.org/anthology/2020.findings-emnlp.92/"),i(ti,"rel","nofollow"),i(Rs,"href","model_doc/plbart"),i(ri,"href","https://arxiv.org/abs/2103.06333"),i(ri,"rel","nofollow"),i(ys,"href","model_doc/poolformer"),i(ai,"href","https://arxiv.org/abs/2111.11418"),i(ai,"rel","nofollow"),i(bs,"href","model_doc/prophetnet"),i(li,"href","https://arxiv.org/abs/2001.04063"),i(li,"rel","nofollow"),i(Ls,"href","model_doc/qdqbert"),i(ni,"href","https://arxiv.org/abs/2004.09602"),i(ni,"rel","nofollow"),i(Ss,"href","model_doc/realm.html"),i(oi,"href","https://arxiv.org/abs/2002.08909"),i(oi,"rel","nofollow"),i(ws,"href","model_doc/reformer"),i(ii,"href","https://arxiv.org/abs/2001.04451"),i(ii,"rel","nofollow"),i(Ms,"href","model_doc/rembert"),i(di,"href","https://arxiv.org/abs/2010.12821"),i(di,"rel","nofollow"),i(Ps,"href","model_doc/regnet"),i(si,"href","https://arxiv.org/abs/2003.13678"),i(si,"rel","nofollow"),i(Gs,"href","model_doc/resnet"),i(hi,"href","https://arxiv.org/abs/1512.03385"),i(hi,"rel","nofollow"),i(Ns,"href","model_doc/roberta"),i(ci,"href","https://arxiv.org/abs/1907.11692"),i(ci,"rel","nofollow"),i(ks,"href","model_doc/roformer"),i(gi,"href","https://arxiv.org/abs/2104.09864"),i(gi,"rel","nofollow"),i(Cs,"href","model_doc/segformer"),i(fi,"href","https://arxiv.org/abs/2105.15203"),i(fi,"rel","nofollow"),i(Is,"href","model_doc/sew"),i(ui,"href","https://arxiv.org/abs/2109.06870"),i(ui,"rel","nofollow"),i(Bs,"href","model_doc/sew_d"),i(vi,"href","https://arxiv.org/abs/2109.06870"),i(vi,"rel","nofollow"),i(xs,"href","model_doc/speech_to_text"),i(Ei,"href","https://arxiv.org/abs/2010.05171"),i(Ei,"rel","nofollow"),i(Os,"href","model_doc/speech_to_text_2"),i(Ti,"href","https://arxiv.org/abs/2104.06678"),i(Ti,"rel","nofollow"),i(Fs,"href","model_doc/splinter"),i(_i,"href","https://arxiv.org/abs/2101.00438"),i(_i,"rel","nofollow"),i(Ws,"href","model_doc/squeezebert"),i(mi,"href","https://arxiv.org/abs/2006.11316"),i(mi,"rel","nofollow"),i(Js,"href","model_doc/swin"),i(pi,"href","https://arxiv.org/abs/2103.14030"),i(pi,"rel","nofollow"),i(Ys,"href","model_doc/t5"),i(Di,"href","https://arxiv.org/abs/1910.10683"),i(Di,"rel","nofollow"),i(Ks,"href","model_doc/t5v1.1"),i(Ai,"href","https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511"),i(Ai,"rel","nofollow"),i(Vs,"href","model_doc/tapas"),i(Ri,"href","https://arxiv.org/abs/2004.02349"),i(Ri,"rel","nofollow"),i(Zs,"href","model_doc/tapex"),i(yi,"href","https://arxiv.org/abs/2107.07653"),i(yi,"rel","nofollow"),i(Xs,"href","model_doc/transfo-xl"),i(bi,"href","https://arxiv.org/abs/1901.02860"),i(bi,"rel","nofollow"),i(zs,"href","model_doc/trocr"),i(Li,"href","https://arxiv.org/abs/2109.10282"),i(Li,"rel","nofollow"),i(Hs,"href","model_doc/unispeech"),i(Si,"href","https://arxiv.org/abs/2101.07597"),i(Si,"rel","nofollow"),i(Us,"href","model_doc/unispeech-sat"),i(wi,"href","https://arxiv.org/abs/2110.05752"),i(wi,"rel","nofollow"),i(qs,"href","model_doc/van"),i(Mi,"href","https://arxiv.org/abs/2202.09741"),i(Mi,"rel","nofollow"),i(js,"href","model_doc/vilt"),i(Pi,"href","https://arxiv.org/abs/2102.03334"),i(Pi,"rel","nofollow"),i(Qs,"href","model_doc/vit"),i(Gi,"href","https://arxiv.org/abs/2010.11929"),i(Gi,"rel","nofollow"),i($s,"href","model_doc/vit_mae"),i(Ni,"href","https://arxiv.org/abs/2111.06377"),i(Ni,"rel","nofollow"),i(eh,"href","model_doc/visual_bert"),i(ki,"href","https://arxiv.org/pdf/1908.03557"),i(ki,"rel","nofollow"),i(th,"href","model_doc/wavlm"),i(Ci,"href","https://arxiv.org/abs/2110.13900"),i(Ci,"rel","nofollow"),i(rh,"href","model_doc/wav2vec2"),i(Ii,"href","https://arxiv.org/abs/2006.11477"),i(Ii,"rel","nofollow"),i(ah,"href","model_doc/wav2vec2_phoneme"),i(Bi,"href","https://arxiv.org/abs/2109.11680"),i(Bi,"rel","nofollow"),i(lh,"href","model_doc/xglm"),i(xi,"href","https://arxiv.org/abs/2112.10668"),i(xi,"rel","nofollow"),i(nh,"href","model_doc/xlm"),i(Oi,"href","https://arxiv.org/abs/1901.07291"),i(Oi,"rel","nofollow"),i(oh,"href","model_doc/xlm-prophetnet"),i(Fi,"href","https://arxiv.org/abs/2001.04063"),i(Fi,"rel","nofollow"),i(ih,"href","model_doc/xlm-roberta"),i(Wi,"href","https://arxiv.org/abs/1911.02116"),i(Wi,"rel","nofollow"),i(dh,"href","model_doc/xlm-roberta-xl"),i(Ji,"href","https://arxiv.org/abs/2105.00572"),i(Ji,"rel","nofollow"),i(sh,"href","model_doc/xlnet"),i(Yi,"href","https://arxiv.org/abs/1906.08237"),i(Yi,"rel","nofollow"),i(hh,"href","model_doc/xlsr_wav2vec2"),i(Ki,"href","https://arxiv.org/abs/2006.13979"),i(Ki,"rel","nofollow"),i(ch,"href","model_doc/xls_r"),i(Vi,"href","https://arxiv.org/abs/2111.09296"),i(Vi,"rel","nofollow"),i(gh,"href","model_doc/yoso"),i(Zi,"href","https://arxiv.org/abs/2111.09714"),i(Zi,"rel","nofollow"),i(wn,"id","frameworks-aceitos"),i(wn,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),i(wn,"href","#frameworks-aceitos"),i(Sa,"class","relative group"),i(uh,"align","center"),i(vh,"align","center"),i(Eh,"align","center"),i(Th,"align","center"),i(_h,"align","center"),i(mh,"align","center"),i(ph,"align","center"),i(Dh,"align","center"),i(Ah,"align","center"),i(Rh,"align","center"),i(yh,"align","center"),i(bh,"align","center"),i(Lh,"align","center"),i(Sh,"align","center"),i(wh,"align","center"),i(Mh,"align","center"),i(Ph,"align","center"),i(Gh,"align","center"),i(Nh,"align","center"),i(kh,"align","center"),i(Ch,"align","center"),i(Ih,"align","center"),i(Bh,"align","center"),i(xh,"align","center"),i(Oh,"align","center"),i(Fh,"align","center"),i(Wh,"align","center"),i(Jh,"align","center"),i(Yh,"align","center"),i(Kh,"align","center"),i(Vh,"align","center"),i(Zh,"align","center"),i(Xh,"align","center"),i(zh,"align","center"),i(Hh,"align","center"),i(Uh,"align","center"),i(qh,"align","center"),i(jh,"align","center"),i(Qh,"align","center"),i($h,"align","center"),i(ec,"align","center"),i(tc,"align","center"),i(rc,"align","center"),i(ac,"align","center"),i(lc,"align","center"),i(nc,"align","center"),i(oc,"align","center"),i(ic,"align","center"),i(dc,"align","center"),i(sc,"align","center"),i(hc,"align","center"),i(cc,"align","center"),i(gc,"align","center"),i(fc,"align","center"),i(uc,"align","center"),i(vc,"align","center"),i(Ec,"align","center"),i(Tc,"align","center"),i(_c,"align","center"),i(mc,"align","center"),i(pc,"align","center"),i(Dc,"align","center"),i(Ac,"align","center"),i(Rc,"align","center"),i(yc,"align","center"),i(bc,"align","center"),i(Lc,"align","center"),i(Sc,"align","center"),i(wc,"align","center"),i(Mc,"align","center"),i(Pc,"align","center"),i(Gc,"align","center"),i(Nc,"align","center"),i(kc,"align","center"),i(Cc,"align","center"),i(Ic,"align","center"),i(Bc,"align","center"),i(xc,"align","center"),i(Oc,"align","center"),i(Fc,"align","center"),i(Wc,"align","center"),i(Jc,"align","center"),i(Yc,"align","center"),i(Kc,"align","center"),i(Vc,"align","center"),i(Zc,"align","center"),i(Xc,"align","center"),i(zc,"align","center"),i(Hc,"align","center"),i(Uc,"align","center"),i(qc,"align","center"),i(jc,"align","center"),i(Qc,"align","center"),i($c,"align","center"),i(eg,"align","center"),i(tg,"align","center"),i(rg,"align","center"),i(ag,"align","center"),i(lg,"align","center"),i(ng,"align","center"),i(og,"align","center"),i(ig,"align","center"),i(dg,"align","center"),i(sg,"align","center"),i(hg,"align","center"),i(cg,"align","center"),i(gg,"align","center"),i(fg,"align","center"),i(ug,"align","center"),i(vg,"align","center"),i(Eg,"align","center"),i(Tg,"align","center"),i(_g,"align","center"),i(mg,"align","center"),i(pg,"align","center"),i(Dg,"align","center"),i(Ag,"align","center"),i(Rg,"align","center"),i(yg,"align","center"),i(bg,"align","center"),i(Lg,"align","center"),i(Sg,"align","center"),i(wg,"align","center"),i(Mg,"align","center"),i(Pg,"align","center"),i(Gg,"align","center"),i(Ng,"align","center"),i(kg,"align","center"),i(Cg,"align","center"),i(Ig,"align","center"),i(Bg,"align","center"),i(xg,"align","center"),i(Og,"align","center"),i(Fg,"align","center"),i(Wg,"align","center"),i(Jg,"align","center"),i(Yg,"align","center"),i(Kg,"align","center"),i(Vg,"align","center"),i(Zg,"align","center"),i(Xg,"align","center"),i(zg,"align","center"),i(Hg,"align","center"),i(Ug,"align","center"),i(qg,"align","center"),i(jg,"align","center"),i(Qg,"align","center"),i($g,"align","center"),i(e1,"align","center"),i(t1,"align","center"),i(r1,"align","center"),i(a1,"align","center"),i(l1,"align","center"),i(n1,"align","center"),i(o1,"align","center"),i(i1,"align","center"),i(d1,"align","center"),i(s1,"align","center"),i(h1,"align","center"),i(c1,"align","center"),i(g1,"align","center"),i(f1,"align","center"),i(u1,"align","center"),i(v1,"align","center"),i(E1,"align","center"),i(T1,"align","center"),i(_1,"align","center"),i(m1,"align","center"),i(p1,"align","center"),i(D1,"align","center"),i(A1,"align","center"),i(R1,"align","center"),i(y1,"align","center"),i(b1,"align","center"),i(L1,"align","center"),i(S1,"align","center"),i(w1,"align","center"),i(M1,"align","center"),i(P1,"align","center"),i(G1,"align","center"),i(N1,"align","center"),i(k1,"align","center"),i(C1,"align","center"),i(I1,"align","center"),i(B1,"align","center"),i(x1,"align","center"),i(O1,"align","center"),i(F1,"align","center"),i(W1,"align","center"),i(J1,"align","center"),i(Y1,"align","center"),i(K1,"align","center"),i(V1,"align","center"),i(Z1,"align","center"),i(X1,"align","center"),i(z1,"align","center"),i(H1,"align","center"),i(U1,"align","center"),i(q1,"align","center"),i(j1,"align","center"),i(Q1,"align","center"),i($1,"align","center"),i(ef,"align","center"),i(tf,"align","center"),i(rf,"align","center"),i(af,"align","center"),i(lf,"align","center"),i(nf,"align","center"),i(of,"align","center"),i(df,"align","center"),i(sf,"align","center"),i(hf,"align","center"),i(cf,"align","center"),i(gf,"align","center"),i(ff,"align","center"),i(uf,"align","center"),i(vf,"align","center"),i(Ef,"align","center"),i(Tf,"align","center"),i(_f,"align","center"),i(mf,"align","center"),i(pf,"align","center"),i(Df,"align","center"),i(Af,"align","center"),i(Rf,"align","center"),i(yf,"align","center"),i(bf,"align","center"),i(Lf,"align","center"),i(Sf,"align","center"),i(wf,"align","center"),i(Mf,"align","center"),i(Pf,"align","center"),i(Gf,"align","center"),i(Nf,"align","center"),i(kf,"align","center"),i(Cf,"align","center"),i(If,"align","center"),i(Bf,"align","center"),i(xf,"align","center"),i(Of,"align","center"),i(Ff,"align","center"),i(Wf,"align","center"),i(Jf,"align","center"),i(Yf,"align","center"),i(Kf,"align","center"),i(Vf,"align","center"),i(Zf,"align","center"),i(Xf,"align","center"),i(zf,"align","center"),i(Hf,"align","center"),i(Uf,"align","center"),i(qf,"align","center"),i(jf,"align","center"),i(Qf,"align","center"),i($f,"align","center"),i(eu,"align","center"),i(tu,"align","center"),i(ru,"align","center"),i(au,"align","center"),i(lu,"align","center"),i(nu,"align","center"),i(ou,"align","center"),i(iu,"align","center"),i(du,"align","center"),i(su,"align","center"),i(hu,"align","center"),i(cu,"align","center"),i(gu,"align","center"),i(fu,"align","center"),i(uu,"align","center"),i(vu,"align","center"),i(Eu,"align","center"),i(Tu,"align","center"),i(_u,"align","center"),i(mu,"align","center"),i(pu,"align","center"),i(Du,"align","center"),i(Au,"align","center"),i(Ru,"align","center"),i(yu,"align","center"),i(bu,"align","center"),i(Lu,"align","center"),i(Su,"align","center"),i(wu,"align","center"),i(Mu,"align","center"),i(Pu,"align","center"),i(Gu,"align","center"),i(Nu,"align","center"),i(ku,"align","center"),i(Cu,"align","center"),i(Iu,"align","center"),i(Bu,"align","center"),i(xu,"align","center"),i(Ou,"align","center"),i(Fu,"align","center"),i(Wu,"align","center"),i(Ju,"align","center"),i(Yu,"align","center"),i(Ku,"align","center"),i(Vu,"align","center"),i(Zu,"align","center"),i(Xu,"align","center"),i(zu,"align","center"),i(Hu,"align","center"),i(Uu,"align","center"),i(qu,"align","center"),i(ju,"align","center"),i(Qu,"align","center"),i($u,"align","center"),i(ev,"align","center"),i(tv,"align","center"),i(rv,"align","center"),i(av,"align","center"),i(lv,"align","center"),i(nv,"align","center"),i(ov,"align","center"),i(iv,"align","center"),i(dv,"align","center"),i(sv,"align","center"),i(hv,"align","center"),i(cv,"align","center"),i(gv,"align","center"),i(fv,"align","center"),i(uv,"align","center"),i(vv,"align","center"),i(Ev,"align","center"),i(Tv,"align","center"),i(_v,"align","center"),i(mv,"align","center"),i(pv,"align","center"),i(Dv,"align","center"),i(Av,"align","center"),i(Rv,"align","center"),i(yv,"align","center"),i(bv,"align","center"),i(Lv,"align","center"),i(Sv,"align","center"),i(wv,"align","center"),i(Mv,"align","center"),i(Pv,"align","center"),i(Gv,"align","center"),i(Nv,"align","center"),i(kv,"align","center"),i(Cv,"align","center"),i(Iv,"align","center"),i(Bv,"align","center"),i(xv,"align","center"),i(Ov,"align","center"),i(Fv,"align","center"),i(Wv,"align","center"),i(Jv,"align","center"),i(Yv,"align","center"),i(Kv,"align","center"),i(Vv,"align","center"),i(Zv,"align","center"),i(Xv,"align","center"),i(zv,"align","center"),i(Hv,"align","center"),i(Uv,"align","center"),i(qv,"align","center"),i(jv,"align","center"),i(Qv,"align","center"),i($v,"align","center"),i(eE,"align","center"),i(tE,"align","center"),i(rE,"align","center"),i(aE,"align","center"),i(lE,"align","center"),i(nE,"align","center"),i(oE,"align","center"),i(iE,"align","center"),i(dE,"align","center"),i(sE,"align","center"),i(hE,"align","center"),i(cE,"align","center"),i(gE,"align","center"),i(fE,"align","center"),i(uE,"align","center"),i(vE,"align","center"),i(EE,"align","center"),i(TE,"align","center"),i(_E,"align","center"),i(mE,"align","center"),i(pE,"align","center"),i(DE,"align","center"),i(AE,"align","center"),i(RE,"align","center"),i(yE,"align","center"),i(bE,"align","center"),i(LE,"align","center"),i(SE,"align","center"),i(wE,"align","center"),i(ME,"align","center"),i(PE,"align","center"),i(GE,"align","center"),i(NE,"align","center"),i(kE,"align","center"),i(CE,"align","center"),i(IE,"align","center"),i(BE,"align","center"),i(xE,"align","center"),i(OE,"align","center"),i(FE,"align","center"),i(WE,"align","center"),i(JE,"align","center"),i(YE,"align","center"),i(KE,"align","center"),i(VE,"align","center"),i(ZE,"align","center"),i(XE,"align","center"),i(zE,"align","center"),i(HE,"align","center"),i(UE,"align","center"),i(qE,"align","center"),i(jE,"align","center"),i(QE,"align","center"),i($E,"align","center"),i(e2,"align","center"),i(t2,"align","center"),i(r2,"align","center"),i(a2,"align","center"),i(l2,"align","center"),i(n2,"align","center"),i(o2,"align","center"),i(i2,"align","center"),i(d2,"align","center"),i(s2,"align","center"),i(h2,"align","center"),i(c2,"align","center"),i(g2,"align","center"),i(f2,"align","center"),i(u2,"align","center"),i(v2,"align","center"),i(E2,"align","center"),i(T2,"align","center"),i(_2,"align","center"),i(m2,"align","center"),i(p2,"align","center"),i(D2,"align","center"),i(A2,"align","center"),i(R2,"align","center"),i(y2,"align","center"),i(b2,"align","center"),i(L2,"align","center"),i(S2,"align","center"),i(w2,"align","center"),i(M2,"align","center"),i(P2,"align","center"),i(G2,"align","center"),i(N2,"align","center"),i(k2,"align","center"),i(C2,"align","center"),i(I2,"align","center"),i(B2,"align","center"),i(x2,"align","center"),i(O2,"align","center"),i(F2,"align","center"),i(W2,"align","center"),i(J2,"align","center"),i(Y2,"align","center"),i(K2,"align","center"),i(V2,"align","center"),i(Z2,"align","center"),i(X2,"align","center"),i(z2,"align","center"),i(H2,"align","center"),i(U2,"align","center"),i(q2,"align","center"),i(j2,"align","center"),i(Q2,"align","center"),i($2,"align","center"),i(eT,"align","center"),i(tT,"align","center"),i(rT,"align","center"),i(aT,"align","center"),i(lT,"align","center"),i(nT,"align","center"),i(oT,"align","center"),i(iT,"align","center"),i(dT,"align","center"),i(sT,"align","center"),i(hT,"align","center"),i(cT,"align","center"),i(gT,"align","center"),i(fT,"align","center"),i(uT,"align","center"),i(vT,"align","center"),i(ET,"align","center"),i(TT,"align","center"),i(_T,"align","center"),i(mT,"align","center"),i(pT,"align","center"),i(DT,"align","center"),i(AT,"align","center"),i(RT,"align","center"),i(yT,"align","center"),i(bT,"align","center"),i(LT,"align","center"),i(ST,"align","center"),i(wT,"align","center"),i(MT,"align","center"),i(PT,"align","center"),i(GT,"align","center"),i(NT,"align","center"),i(kT,"align","center"),i(CT,"align","center"),i(IT,"align","center"),i(BT,"align","center"),i(xT,"align","center"),i(OT,"align","center"),i(FT,"align","center"),i(WT,"align","center"),i(JT,"align","center"),i(YT,"align","center"),i(KT,"align","center"),i(VT,"align","center"),i(ZT,"align","center"),i(XT,"align","center"),i(zT,"align","center"),i(HT,"align","center"),i(UT,"align","center"),i(qT,"align","center"),i(jT,"align","center"),i(QT,"align","center"),i($T,"align","center"),i(e_,"align","center"),i(t_,"align","center"),i(r_,"align","center"),i(a_,"align","center"),i(l_,"align","center"),i(n_,"align","center"),i(o_,"align","center"),i(i_,"align","center"),i(d_,"align","center"),i(s_,"align","center"),i(h_,"align","center"),i(c_,"align","center"),i(g_,"align","center"),i(f_,"align","center"),i(u_,"align","center"),i(v_,"align","center"),i(E_,"align","center"),i(T_,"align","center"),i(__,"align","center"),i(m_,"align","center"),i(p_,"align","center"),i(D_,"align","center"),i(A_,"align","center"),i(R_,"align","center"),i(y_,"align","center"),i(b_,"align","center"),i(L_,"align","center"),i(S_,"align","center"),i(w_,"align","center"),i(M_,"align","center"),i(P_,"align","center"),i(G_,"align","center"),i(N_,"align","center"),i(k_,"align","center"),i(C_,"align","center"),i(I_,"align","center"),i(B_,"align","center"),i(x_,"align","center"),i(O_,"align","center"),i(F_,"align","center"),i(W_,"align","center"),i(J_,"align","center"),i(Y_,"align","center"),i(K_,"align","center"),i(V_,"align","center"),i(Z_,"align","center"),i(X_,"align","center"),i(z_,"align","center"),i(H_,"align","center"),i(U_,"align","center"),i(q_,"align","center"),i(j_,"align","center"),i(Q_,"align","center"),i($_,"align","center"),i(e3,"align","center"),i(t3,"align","center"),i(r3,"align","center"),i(a3,"align","center"),i(l3,"align","center"),i(n3,"align","center"),i(o3,"align","center"),i(i3,"align","center"),i(d3,"align","center"),i(s3,"align","center"),i(h3,"align","center"),i(c3,"align","center"),i(g3,"align","center"),i(f3,"align","center"),i(u3,"align","center"),i(v3,"align","center"),i(E3,"align","center"),i(T3,"align","center"),i(_3,"align","center"),i(m3,"align","center"),i(p3,"align","center"),i(D3,"align","center"),i(A3,"align","center"),i(R3,"align","center"),i(y3,"align","center"),i(b3,"align","center"),i(L3,"align","center"),i(S3,"align","center"),i(w3,"align","center"),i(M3,"align","center"),i(P3,"align","center"),i(G3,"align","center"),i(N3,"align","center"),i(k3,"align","center"),i(C3,"align","center"),i(I3,"align","center"),i(B3,"align","center"),i(x3,"align","center"),i(O3,"align","center")},m(u,v){e(document.head,Da),E(u,Lp,v),E(u,Aa,v),e(Aa,Ma),e(Ma,F3),Dp(Nn,F3,null),e(Aa,jp),e(Aa,W3),e(W3,Qp),E(u,Sp,v),E(u,$i,v),e($i,$p),E(u,wp,v),E(u,vt,v),e(vt,J3),e(J3,e6),e(vt,t6),e(vt,Y3),e(Y3,r6),e(vt,a6),e(vt,K3),e(K3,l6),e(vt,n6),e(vt,V3),e(V3,o6),E(u,Mp,v),E(u,Et,v),e(Et,i6),e(Et,kn),e(kn,d6),e(Et,s6),e(Et,Cn),e(Cn,h6),e(Et,c6),e(Et,In),e(In,g6),e(Et,f6),E(u,Pp,v),E(u,ed,v),e(ed,u6),E(u,Gp,v),E(u,Ra,v),e(Ra,Pa),e(Pa,Z3),Dp(Bn,Z3,null),e(Ra,v6),e(Ra,X3),e(X3,E6),E(u,Np,v),E(u,ya,v),e(ya,ua),e(ya,T6),E(u,kp,v),E(u,Cp,v),E(u,ba,v),e(ba,Ga),e(Ga,z3),Dp(xn,z3,null),e(ba,_6),e(ba,H3),e(H3,m6),E(u,Ip,v),E(u,td,v),e(td,p6),E(u,Bp,v),E(u,T,v),e(T,U3),e(U3,rd),e(rd,q3),e(q3,D6),e(rd,A6),e(T,R6),e(T,j3),e(j3,ad),e(ad,Q3),e(Q3,y6),e(ad,b6),e(T,L6),e(T,$3),e($3,ld),e(ld,e5),e(e5,S6),e(ld,w6),e(T,M6),e(T,t5),e(t5,nd),e(nd,r5),e(r5,P6),e(nd,G6),e(T,N6),e(T,a5),e(a5,od),e(od,l5),e(l5,k6),e(od,C6),e(T,I6),e(T,n5),e(n5,id),e(id,o5),e(o5,B6),e(id,x6),e(T,O6),e(T,i5),e(i5,dd),e(dd,d5),e(d5,F6),e(dd,W6),e(T,J6),e(T,s5),e(s5,sd),e(sd,h5),e(h5,Y6),e(sd,K6),E(u,xp,v),E(u,hd,v),e(hd,V6),E(u,Op,v),E(u,La,v),e(La,Na),e(Na,c5),Dp(On,c5,null),e(La,Z6),e(La,g5),e(g5,X6),E(u,Fp,v),E(u,h,v),e(h,ka),e(ka,f5),e(f5,cd),e(cd,z6),e(ka,H6),e(ka,Fn),e(Fn,U6),e(ka,q6),e(h,j6),e(h,Ca),e(Ca,u5),e(u5,gd),e(gd,Q6),e(Ca,$6),e(Ca,Wn),e(Wn,e7),e(Ca,t7),e(h,r7),e(h,Ia),e(Ia,v5),e(v5,fd),e(fd,a7),e(Ia,l7),e(Ia,Jn),e(Jn,n7),e(Ia,o7),e(h,i7),e(h,Ba),e(Ba,E5),e(E5,ud),e(ud,d7),e(Ba,s7),e(Ba,Yn),e(Yn,h7),e(Ba,c7),e(h,g7),e(h,xa),e(xa,T5),e(T5,vd),e(vd,f7),e(xa,u7),e(xa,Kn),e(Kn,v7),e(xa,E7),e(h,T7),e(h,Oa),e(Oa,_5),e(_5,Ed),e(Ed,_7),e(Oa,m7),e(Oa,Vn),e(Vn,p7),e(Oa,D7),e(h,A7),e(h,Fa),e(Fa,m5),e(m5,Td),e(Td,R7),e(Fa,y7),e(Fa,Zn),e(Zn,b7),e(Fa,L7),e(h,S7),e(h,Wa),e(Wa,p5),e(p5,_d),e(_d,w7),e(Wa,M7),e(Wa,Xn),e(Xn,P7),e(Wa,G7),e(h,N7),e(h,Ja),e(Ja,D5),e(D5,md),e(md,k7),e(Ja,C7),e(Ja,zn),e(zn,I7),e(Ja,B7),e(h,x7),e(h,Ya),e(Ya,A5),e(A5,pd),e(pd,O7),e(Ya,F7),e(Ya,Hn),e(Hn,W7),e(Ya,J7),e(h,Y7),e(h,Ka),e(Ka,R5),e(R5,Dd),e(Dd,K7),e(Ka,V7),e(Ka,Un),e(Un,Z7),e(Ka,X7),e(h,z7),e(h,Va),e(Va,y5),e(y5,Ad),e(Ad,H7),e(Va,U7),e(Va,qn),e(qn,q7),e(Va,j7),e(h,Q7),e(h,Za),e(Za,b5),e(b5,Rd),e(Rd,$7),e(Za,e8),e(Za,jn),e(jn,t8),e(Za,r8),e(h,a8),e(h,Xa),e(Xa,L5),e(L5,yd),e(yd,l8),e(Xa,n8),e(Xa,Qn),e(Qn,o8),e(Xa,i8),e(h,d8),e(h,va),e(va,S5),e(S5,bd),e(bd,s8),e(va,h8),e(va,$n),e($n,c8),e(va,g8),e(va,w5),e(w5,f8),e(va,u8),e(h,v8),e(h,za),e(za,M5),e(M5,Ld),e(Ld,E8),e(za,T8),e(za,eo),e(eo,_8),e(za,m8),e(h,p8),e(h,Ha),e(Ha,P5),e(P5,Sd),e(Sd,D8),e(Ha,A8),e(Ha,to),e(to,R8),e(Ha,y8),e(h,b8),e(h,Ua),e(Ua,G5),e(G5,wd),e(wd,L8),e(Ua,S8),e(Ua,ro),e(ro,w8),e(Ua,M8),e(h,P8),e(h,qa),e(qa,N5),e(N5,Md),e(Md,G8),e(qa,N8),e(qa,ao),e(ao,k8),e(qa,C8),e(h,I8),e(h,ja),e(ja,k5),e(k5,Pd),e(Pd,B8),e(ja,x8),e(ja,lo),e(lo,O8),e(ja,F8),e(h,W8),e(h,Ea),e(Ea,C5),e(C5,Gd),e(Gd,J8),e(Ea,Y8),e(Ea,no),e(no,K8),e(Ea,V8),e(Ea,I5),e(I5,Z8),e(Ea,X8),e(h,z8),e(h,Qa),e(Qa,B5),e(B5,Nd),e(Nd,H8),e(Qa,U8),e(Qa,oo),e(oo,q8),e(Qa,j8),e(h,Q8),e(h,$a),e($a,x5),e(x5,kd),e(kd,$8),e($a,e9),e($a,io),e(io,t9),e($a,r9),e(h,a9),e(h,el),e(el,O5),e(O5,Cd),e(Cd,l9),e(el,n9),e(el,so),e(so,o9),e(el,i9),e(h,d9),e(h,tl),e(tl,F5),e(F5,Id),e(Id,s9),e(tl,h9),e(tl,ho),e(ho,c9),e(tl,g9),e(h,f9),e(h,rl),e(rl,W5),e(W5,Bd),e(Bd,u9),e(rl,v9),e(rl,co),e(co,E9),e(rl,T9),e(h,_9),e(h,al),e(al,J5),e(J5,xd),e(xd,m9),e(al,p9),e(al,go),e(go,D9),e(al,A9),e(h,R9),e(h,ll),e(ll,Y5),e(Y5,Od),e(Od,y9),e(ll,b9),e(ll,fo),e(fo,L9),e(ll,S9),e(h,w9),e(h,nl),e(nl,K5),e(K5,Fd),e(Fd,M9),e(nl,P9),e(nl,uo),e(uo,G9),e(nl,N9),e(h,k9),e(h,ut),e(ut,V5),e(V5,Wd),e(Wd,C9),e(ut,I9),e(ut,vo),e(vo,B9),e(ut,x9),e(ut,Eo),e(Eo,O9),e(ut,F9),e(ut,To),e(To,W9),e(ut,J9),e(ut,_o),e(_o,Y9),e(ut,K9),e(h,V9),e(h,ol),e(ol,Z5),e(Z5,Jd),e(Jd,Z9),e(ol,X9),e(ol,mo),e(mo,z9),e(ol,H9),e(h,U9),e(h,il),e(il,X5),e(X5,Yd),e(Yd,q9),e(il,j9),e(il,po),e(po,Q9),e(il,$9),e(h,eD),e(h,dl),e(dl,z5),e(z5,Kd),e(Kd,tD),e(dl,rD),e(dl,Do),e(Do,aD),e(dl,lD),e(h,nD),e(h,sl),e(sl,H5),e(H5,Vd),e(Vd,oD),e(sl,iD),e(sl,Ao),e(Ao,dD),e(sl,sD),e(h,hD),e(h,hl),e(hl,U5),e(U5,Zd),e(Zd,cD),e(hl,gD),e(hl,Ro),e(Ro,fD),e(hl,uD),e(h,vD),e(h,cl),e(cl,q5),e(q5,Xd),e(Xd,ED),e(cl,TD),e(cl,yo),e(yo,_D),e(cl,mD),e(h,pD),e(h,gl),e(gl,j5),e(j5,zd),e(zd,DD),e(gl,AD),e(gl,bo),e(bo,RD),e(gl,yD),e(h,bD),e(h,fl),e(fl,Q5),e(Q5,Hd),e(Hd,LD),e(fl,SD),e(fl,Lo),e(Lo,wD),e(fl,MD),e(h,PD),e(h,ul),e(ul,$5),e($5,Ud),e(Ud,GD),e(ul,ND),e(ul,So),e(So,kD),e(ul,CD),e(h,ID),e(h,Tt),e(Tt,e4),e(e4,qd),e(qd,BD),e(Tt,xD),e(Tt,wo),e(wo,OD),e(Tt,FD),e(Tt,t4),e(t4,WD),e(Tt,JD),e(Tt,r4),e(r4,YD),e(Tt,KD),e(h,VD),e(h,vl),e(vl,a4),e(a4,jd),e(jd,ZD),e(vl,XD),e(vl,Mo),e(Mo,zD),e(vl,HD),e(h,UD),e(h,El),e(El,l4),e(l4,Qd),e(Qd,qD),e(El,jD),e(El,Po),e(Po,QD),e(El,$D),e(h,eA),e(h,Tl),e(Tl,n4),e(n4,$d),e($d,tA),e(Tl,rA),e(Tl,Go),e(Go,aA),e(Tl,lA),e(h,nA),e(h,_l),e(_l,o4),e(o4,es),e(es,oA),e(_l,iA),e(_l,No),e(No,dA),e(_l,sA),e(h,hA),e(h,ml),e(ml,i4),e(i4,ts),e(ts,cA),e(ml,gA),e(ml,ko),e(ko,fA),e(ml,uA),e(h,vA),e(h,pl),e(pl,d4),e(d4,rs),e(rs,EA),e(pl,TA),e(pl,Co),e(Co,_A),e(pl,mA),e(h,pA),e(h,Dl),e(Dl,s4),e(s4,as),e(as,DA),e(Dl,AA),e(Dl,Io),e(Io,RA),e(Dl,yA),e(h,bA),e(h,Al),e(Al,h4),e(h4,ls),e(ls,LA),e(Al,SA),e(Al,Bo),e(Bo,wA),e(Al,MA),e(h,PA),e(h,Rl),e(Rl,c4),e(c4,ns),e(ns,GA),e(Rl,NA),e(Rl,xo),e(xo,kA),e(Rl,CA),e(h,IA),e(h,yl),e(yl,g4),e(g4,os),e(os,BA),e(yl,xA),e(yl,Oo),e(Oo,OA),e(yl,FA),e(h,WA),e(h,bl),e(bl,f4),e(f4,is),e(is,JA),e(bl,YA),e(bl,Fo),e(Fo,KA),e(bl,VA),e(h,ZA),e(h,Ll),e(Ll,u4),e(u4,ds),e(ds,XA),e(Ll,zA),e(Ll,Wo),e(Wo,HA),e(Ll,UA),e(h,qA),e(h,Sl),e(Sl,v4),e(v4,ss),e(ss,jA),e(Sl,QA),e(Sl,Jo),e(Jo,$A),e(Sl,eR),e(h,tR),e(h,wl),e(wl,E4),e(E4,hs),e(hs,rR),e(wl,aR),e(wl,Yo),e(Yo,lR),e(wl,nR),e(h,oR),e(h,Ta),e(Ta,T4),e(T4,cs),e(cs,iR),e(Ta,dR),e(Ta,Ko),e(Ko,sR),e(Ta,hR),e(Ta,Vo),e(Vo,cR),e(Ta,gR),e(h,fR),e(h,Ml),e(Ml,_4),e(_4,gs),e(gs,uR),e(Ml,vR),e(Ml,Zo),e(Zo,ER),e(Ml,TR),e(h,_R),e(h,Pl),e(Pl,m4),e(m4,fs),e(fs,mR),e(Pl,pR),e(Pl,Xo),e(Xo,DR),e(Pl,AR),e(h,RR),e(h,Gl),e(Gl,p4),e(p4,us),e(us,yR),e(Gl,bR),e(Gl,zo),e(zo,LR),e(Gl,SR),e(h,wR),e(h,Nl),e(Nl,D4),e(D4,vs),e(vs,MR),e(Nl,PR),e(Nl,Ho),e(Ho,GR),e(Nl,NR),e(h,kR),e(h,kl),e(kl,A4),e(A4,Es),e(Es,CR),e(kl,IR),e(kl,Uo),e(Uo,BR),e(kl,xR),e(h,OR),e(h,Cl),e(Cl,R4),e(R4,Ts),e(Ts,FR),e(Cl,WR),e(Cl,qo),e(qo,JR),e(Cl,YR),e(h,KR),e(h,Il),e(Il,y4),e(y4,_s),e(_s,VR),e(Il,ZR),e(Il,jo),e(jo,XR),e(Il,zR),e(h,HR),e(h,Bl),e(Bl,b4),e(b4,ms),e(ms,UR),e(Bl,qR),e(Bl,Qo),e(Qo,jR),e(Bl,QR),e(h,$R),e(h,xl),e(xl,L4),e(L4,ps),e(ps,ey),e(xl,ty),e(xl,$o),e($o,ry),e(xl,ay),e(h,ly),e(h,Ol),e(Ol,S4),e(S4,Ds),e(Ds,ny),e(Ol,oy),e(Ol,ei),e(ei,iy),e(Ol,dy),e(h,sy),e(h,Fl),e(Fl,w4),e(w4,As),e(As,hy),e(Fl,cy),e(Fl,ti),e(ti,gy),e(Fl,fy),e(h,uy),e(h,Wl),e(Wl,M4),e(M4,Rs),e(Rs,vy),e(Wl,Ey),e(Wl,ri),e(ri,Ty),e(Wl,_y),e(h,my),e(h,Jl),e(Jl,P4),e(P4,ys),e(ys,py),e(Jl,Dy),e(Jl,ai),e(ai,Ay),e(Jl,Ry),e(h,yy),e(h,Yl),e(Yl,G4),e(G4,bs),e(bs,by),e(Yl,Ly),e(Yl,li),e(li,Sy),e(Yl,wy),e(h,My),e(h,Kl),e(Kl,N4),e(N4,Ls),e(Ls,Py),e(Kl,Gy),e(Kl,ni),e(ni,Ny),e(Kl,ky),e(h,Cy),e(h,Vl),e(Vl,k4),e(k4,Ss),e(Ss,Iy),e(Vl,By),e(Vl,oi),e(oi,xy),e(Vl,Oy),e(h,Fy),e(h,Zl),e(Zl,C4),e(C4,ws),e(ws,Wy),e(Zl,Jy),e(Zl,ii),e(ii,Yy),e(Zl,Ky),e(h,Vy),e(h,Xl),e(Xl,I4),e(I4,Ms),e(Ms,Zy),e(Xl,Xy),e(Xl,di),e(di,zy),e(Xl,Hy),e(h,Uy),e(h,zl),e(zl,B4),e(B4,Ps),e(Ps,qy),e(zl,jy),e(zl,si),e(si,Qy),e(zl,$y),e(h,eb),e(h,Hl),e(Hl,x4),e(x4,Gs),e(Gs,tb),e(Hl,rb),e(Hl,hi),e(hi,ab),e(Hl,lb),e(h,nb),e(h,Ul),e(Ul,O4),e(O4,Ns),e(Ns,ob),e(Ul,ib),e(Ul,ci),e(ci,db),e(Ul,sb),e(h,hb),e(h,ql),e(ql,F4),e(F4,ks),e(ks,cb),e(ql,gb),e(ql,gi),e(gi,fb),e(ql,ub),e(h,vb),e(h,jl),e(jl,W4),e(W4,Cs),e(Cs,Eb),e(jl,Tb),e(jl,fi),e(fi,_b),e(jl,mb),e(h,pb),e(h,Ql),e(Ql,J4),e(J4,Is),e(Is,Db),e(Ql,Ab),e(Ql,ui),e(ui,Rb),e(Ql,yb),e(h,bb),e(h,$l),e($l,Y4),e(Y4,Bs),e(Bs,Lb),e($l,Sb),e($l,vi),e(vi,wb),e($l,Mb),e(h,Pb),e(h,en),e(en,K4),e(K4,xs),e(xs,Gb),e(en,Nb),e(en,Ei),e(Ei,kb),e(en,Cb),e(h,Ib),e(h,tn),e(tn,V4),e(V4,Os),e(Os,Bb),e(tn,xb),e(tn,Ti),e(Ti,Ob),e(tn,Fb),e(h,Wb),e(h,rn),e(rn,Z4),e(Z4,Fs),e(Fs,Jb),e(rn,Yb),e(rn,_i),e(_i,Kb),e(rn,Vb),e(h,Zb),e(h,an),e(an,X4),e(X4,Ws),e(Ws,Xb),e(an,zb),e(an,mi),e(mi,Hb),e(an,Ub),e(h,qb),e(h,ln),e(ln,z4),e(z4,Js),e(Js,jb),e(ln,Qb),e(ln,pi),e(pi,$b),e(ln,eL),e(h,tL),e(h,nn),e(nn,H4),e(H4,Ys),e(Ys,rL),e(nn,aL),e(nn,Di),e(Di,lL),e(nn,nL),e(h,oL),e(h,on),e(on,U4),e(U4,Ks),e(Ks,iL),e(on,dL),e(on,Ai),e(Ai,sL),e(on,hL),e(h,cL),e(h,dn),e(dn,q4),e(q4,Vs),e(Vs,gL),e(dn,fL),e(dn,Ri),e(Ri,uL),e(dn,vL),e(h,EL),e(h,sn),e(sn,j4),e(j4,Zs),e(Zs,TL),e(sn,_L),e(sn,yi),e(yi,mL),e(sn,pL),e(h,DL),e(h,_a),e(_a,Q4),e(Q4,Xs),e(Xs,AL),e(_a,RL),e(_a,bi),e(bi,yL),e(_a,bL),e(_a,$4),e($4,LL),e(_a,SL),e(h,wL),e(h,hn),e(hn,em),e(em,zs),e(zs,ML),e(hn,PL),e(hn,Li),e(Li,GL),e(hn,NL),e(h,kL),e(h,cn),e(cn,tm),e(tm,Hs),e(Hs,CL),e(cn,IL),e(cn,Si),e(Si,BL),e(cn,xL),e(h,OL),e(h,gn),e(gn,rm),e(rm,Us),e(Us,FL),e(gn,WL),e(gn,wi),e(wi,JL),e(gn,YL),e(h,KL),e(h,fn),e(fn,am),e(am,qs),e(qs,VL),e(fn,ZL),e(fn,Mi),e(Mi,XL),e(fn,zL),e(h,HL),e(h,un),e(un,lm),e(lm,js),e(js,UL),e(un,qL),e(un,Pi),e(Pi,jL),e(un,QL),e(h,$L),e(h,vn),e(vn,nm),e(nm,Qs),e(Qs,eS),e(vn,tS),e(vn,Gi),e(Gi,rS),e(vn,aS),e(h,lS),e(h,En),e(En,om),e(om,$s),e($s,nS),e(En,oS),e(En,Ni),e(Ni,iS),e(En,dS),e(h,sS),e(h,Tn),e(Tn,im),e(im,eh),e(eh,hS),e(Tn,cS),e(Tn,ki),e(ki,gS),e(Tn,fS),e(h,uS),e(h,_n),e(_n,dm),e(dm,th),e(th,vS),e(_n,ES),e(_n,Ci),e(Ci,TS),e(_n,_S),e(h,mS),e(h,mn),e(mn,sm),e(sm,rh),e(rh,pS),e(mn,DS),e(mn,Ii),e(Ii,AS),e(mn,RS),e(h,yS),e(h,pn),e(pn,hm),e(hm,ah),e(ah,bS),e(pn,LS),e(pn,Bi),e(Bi,SS),e(pn,wS),e(h,MS),e(h,Dn),e(Dn,cm),e(cm,lh),e(lh,PS),e(Dn,GS),e(Dn,xi),e(xi,NS),e(Dn,kS),e(h,CS),e(h,An),e(An,gm),e(gm,nh),e(nh,IS),e(An,BS),e(An,Oi),e(Oi,xS),e(An,OS),e(h,FS),e(h,Rn),e(Rn,fm),e(fm,oh),e(oh,WS),e(Rn,JS),e(Rn,Fi),e(Fi,YS),e(Rn,KS),e(h,VS),e(h,ma),e(ma,um),e(um,ih),e(ih,ZS),e(ma,XS),e(ma,Wi),e(Wi,zS),e(ma,HS),e(ma,vm),e(vm,US),e(ma,qS),e(h,jS),e(h,yn),e(yn,Em),e(Em,dh),e(dh,QS),e(yn,$S),e(yn,Ji),e(Ji,ew),e(yn,tw),e(h,rw),e(h,pa),e(pa,Tm),e(Tm,sh),e(sh,aw),e(pa,lw),e(pa,Yi),e(Yi,nw),e(pa,ow),e(pa,_m),e(_m,iw),e(pa,dw),e(h,sw),e(h,bn),e(bn,mm),e(mm,hh),e(hh,hw),e(bn,cw),e(bn,Ki),e(Ki,gw),e(bn,fw),e(h,uw),e(h,Ln),e(Ln,pm),e(pm,ch),e(ch,vw),e(Ln,Ew),e(Ln,Vi),e(Vi,Tw),e(Ln,_w),e(h,mw),e(h,Sn),e(Sn,Dm),e(Dm,gh),e(gh,pw),e(Sn,Dw),e(Sn,Zi),e(Zi,Aw),e(Sn,Rw),E(u,Wp,v),E(u,Sa,v),e(Sa,wn),e(wn,Am),Dp(Xi,Am,null),e(Sa,yw),e(Sa,Rm),e(Rm,bw),E(u,Jp,v),E(u,fh,v),e(fh,Lw),E(u,Yp,v),E(u,Mn,v),e(Mn,ym),e(ym,m),e(m,uh),e(uh,Sw),e(m,ww),e(m,vh),e(vh,Mw),e(m,Pw),e(m,Eh),e(Eh,Gw),e(m,Nw),e(m,Th),e(Th,kw),e(m,Cw),e(m,_h),e(_h,Iw),e(m,Bw),e(m,mh),e(mh,xw),e(Mn,Ow),e(Mn,g),e(g,p),e(p,ph),e(ph,Fw),e(p,Ww),e(p,Dh),e(Dh,Jw),e(p,Yw),e(p,Ah),e(Ah,Kw),e(p,Vw),e(p,Rh),e(Rh,Zw),e(p,Xw),e(p,yh),e(yh,zw),e(p,Hw),e(p,bh),e(bh,Uw),e(g,qw),e(g,D),e(D,Lh),e(Lh,jw),e(D,Qw),e(D,Sh),e(Sh,$w),e(D,eM),e(D,wh),e(wh,tM),e(D,rM),e(D,Mh),e(Mh,aM),e(D,lM),e(D,Ph),e(Ph,nM),e(D,oM),e(D,Gh),e(Gh,iM),e(g,dM),e(g,A),e(A,Nh),e(Nh,sM),e(A,hM),e(A,kh),e(kh,cM),e(A,gM),e(A,Ch),e(Ch,fM),e(A,uM),e(A,Ih),e(Ih,vM),e(A,EM),e(A,Bh),e(Bh,TM),e(A,_M),e(A,xh),e(xh,mM),e(g,pM),e(g,R),e(R,Oh),e(Oh,DM),e(R,AM),e(R,Fh),e(Fh,RM),e(R,yM),e(R,Wh),e(Wh,bM),e(R,LM),e(R,Jh),e(Jh,SM),e(R,wM),e(R,Yh),e(Yh,MM),e(R,PM),e(R,Kh),e(Kh,GM),e(g,NM),e(g,y),e(y,Vh),e(Vh,kM),e(y,CM),e(y,Zh),e(Zh,IM),e(y,BM),e(y,Xh),e(Xh,xM),e(y,OM),e(y,zh),e(zh,FM),e(y,WM),e(y,Hh),e(Hh,JM),e(y,YM),e(y,Uh),e(Uh,KM),e(g,VM),e(g,b),e(b,qh),e(qh,ZM),e(b,XM),e(b,jh),e(jh,zM),e(b,HM),e(b,Qh),e(Qh,UM),e(b,qM),e(b,$h),e($h,jM),e(b,QM),e(b,ec),e(ec,$M),e(b,eP),e(b,tc),e(tc,tP),e(g,rP),e(g,L),e(L,rc),e(rc,aP),e(L,lP),e(L,ac),e(ac,nP),e(L,oP),e(L,lc),e(lc,iP),e(L,dP),e(L,nc),e(nc,sP),e(L,hP),e(L,oc),e(oc,cP),e(L,gP),e(L,ic),e(ic,fP),e(g,uP),e(g,S),e(S,dc),e(dc,vP),e(S,EP),e(S,sc),e(sc,TP),e(S,_P),e(S,hc),e(hc,mP),e(S,pP),e(S,cc),e(cc,DP),e(S,AP),e(S,gc),e(gc,RP),e(S,yP),e(S,fc),e(fc,bP),e(g,LP),e(g,w),e(w,uc),e(uc,SP),e(w,wP),e(w,vc),e(vc,MP),e(w,PP),e(w,Ec),e(Ec,GP),e(w,NP),e(w,Tc),e(Tc,kP),e(w,CP),e(w,_c),e(_c,IP),e(w,BP),e(w,mc),e(mc,xP),e(g,OP),e(g,M),e(M,pc),e(pc,FP),e(M,WP),e(M,Dc),e(Dc,JP),e(M,YP),e(M,Ac),e(Ac,KP),e(M,VP),e(M,Rc),e(Rc,ZP),e(M,XP),e(M,yc),e(yc,zP),e(M,HP),e(M,bc),e(bc,UP),e(g,qP),e(g,P),e(P,Lc),e(Lc,jP),e(P,QP),e(P,Sc),e(Sc,$P),e(P,eG),e(P,wc),e(wc,tG),e(P,rG),e(P,Mc),e(Mc,aG),e(P,lG),e(P,Pc),e(Pc,nG),e(P,oG),e(P,Gc),e(Gc,iG),e(g,dG),e(g,G),e(G,Nc),e(Nc,sG),e(G,hG),e(G,kc),e(kc,cG),e(G,gG),e(G,Cc),e(Cc,fG),e(G,uG),e(G,Ic),e(Ic,vG),e(G,EG),e(G,Bc),e(Bc,TG),e(G,_G),e(G,xc),e(xc,mG),e(g,pG),e(g,N),e(N,Oc),e(Oc,DG),e(N,AG),e(N,Fc),e(Fc,RG),e(N,yG),e(N,Wc),e(Wc,bG),e(N,LG),e(N,Jc),e(Jc,SG),e(N,wG),e(N,Yc),e(Yc,MG),e(N,PG),e(N,Kc),e(Kc,GG),e(g,NG),e(g,k),e(k,Vc),e(Vc,kG),e(k,CG),e(k,Zc),e(Zc,IG),e(k,BG),e(k,Xc),e(Xc,xG),e(k,OG),e(k,zc),e(zc,FG),e(k,WG),e(k,Hc),e(Hc,JG),e(k,YG),e(k,Uc),e(Uc,KG),e(g,VG),e(g,C),e(C,qc),e(qc,ZG),e(C,XG),e(C,jc),e(jc,zG),e(C,HG),e(C,Qc),e(Qc,UG),e(C,qG),e(C,$c),e($c,jG),e(C,QG),e(C,eg),e(eg,$G),e(C,eN),e(C,tg),e(tg,tN),e(g,rN),e(g,I),e(I,rg),e(rg,aN),e(I,lN),e(I,ag),e(ag,nN),e(I,oN),e(I,lg),e(lg,iN),e(I,dN),e(I,ng),e(ng,sN),e(I,hN),e(I,og),e(og,cN),e(I,gN),e(I,ig),e(ig,fN),e(g,uN),e(g,B),e(B,dg),e(dg,vN),e(B,EN),e(B,sg),e(sg,TN),e(B,_N),e(B,hg),e(hg,mN),e(B,pN),e(B,cg),e(cg,DN),e(B,AN),e(B,gg),e(gg,RN),e(B,yN),e(B,fg),e(fg,bN),e(g,LN),e(g,x),e(x,ug),e(ug,SN),e(x,wN),e(x,vg),e(vg,MN),e(x,PN),e(x,Eg),e(Eg,GN),e(x,NN),e(x,Tg),e(Tg,kN),e(x,CN),e(x,_g),e(_g,IN),e(x,BN),e(x,mg),e(mg,xN),e(g,ON),e(g,O),e(O,pg),e(pg,FN),e(O,WN),e(O,Dg),e(Dg,JN),e(O,YN),e(O,Ag),e(Ag,KN),e(O,VN),e(O,Rg),e(Rg,ZN),e(O,XN),e(O,yg),e(yg,zN),e(O,HN),e(O,bg),e(bg,UN),e(g,qN),e(g,F),e(F,Lg),e(Lg,jN),e(F,QN),e(F,Sg),e(Sg,$N),e(F,ek),e(F,wg),e(wg,tk),e(F,rk),e(F,Mg),e(Mg,ak),e(F,lk),e(F,Pg),e(Pg,nk),e(F,ok),e(F,Gg),e(Gg,ik),e(g,dk),e(g,W),e(W,Ng),e(Ng,sk),e(W,hk),e(W,kg),e(kg,ck),e(W,gk),e(W,Cg),e(Cg,fk),e(W,uk),e(W,Ig),e(Ig,vk),e(W,Ek),e(W,Bg),e(Bg,Tk),e(W,_k),e(W,xg),e(xg,mk),e(g,pk),e(g,J),e(J,Og),e(Og,Dk),e(J,Ak),e(J,Fg),e(Fg,Rk),e(J,yk),e(J,Wg),e(Wg,bk),e(J,Lk),e(J,Jg),e(Jg,Sk),e(J,wk),e(J,Yg),e(Yg,Mk),e(J,Pk),e(J,Kg),e(Kg,Gk),e(g,Nk),e(g,Y),e(Y,Vg),e(Vg,kk),e(Y,Ck),e(Y,Zg),e(Zg,Ik),e(Y,Bk),e(Y,Xg),e(Xg,xk),e(Y,Ok),e(Y,zg),e(zg,Fk),e(Y,Wk),e(Y,Hg),e(Hg,Jk),e(Y,Yk),e(Y,Ug),e(Ug,Kk),e(g,Vk),e(g,K),e(K,qg),e(qg,Zk),e(K,Xk),e(K,jg),e(jg,zk),e(K,Hk),e(K,Qg),e(Qg,Uk),e(K,qk),e(K,$g),e($g,jk),e(K,Qk),e(K,e1),e(e1,$k),e(K,eC),e(K,t1),e(t1,tC),e(g,rC),e(g,V),e(V,r1),e(r1,aC),e(V,lC),e(V,a1),e(a1,nC),e(V,oC),e(V,l1),e(l1,iC),e(V,dC),e(V,n1),e(n1,sC),e(V,hC),e(V,o1),e(o1,cC),e(V,gC),e(V,i1),e(i1,fC),e(g,uC),e(g,Z),e(Z,d1),e(d1,vC),e(Z,EC),e(Z,s1),e(s1,TC),e(Z,_C),e(Z,h1),e(h1,mC),e(Z,pC),e(Z,c1),e(c1,DC),e(Z,AC),e(Z,g1),e(g1,RC),e(Z,yC),e(Z,f1),e(f1,bC),e(g,LC),e(g,X),e(X,u1),e(u1,SC),e(X,wC),e(X,v1),e(v1,MC),e(X,PC),e(X,E1),e(E1,GC),e(X,NC),e(X,T1),e(T1,kC),e(X,CC),e(X,_1),e(_1,IC),e(X,BC),e(X,m1),e(m1,xC),e(g,OC),e(g,z),e(z,p1),e(p1,FC),e(z,WC),e(z,D1),e(D1,JC),e(z,YC),e(z,A1),e(A1,KC),e(z,VC),e(z,R1),e(R1,ZC),e(z,XC),e(z,y1),e(y1,zC),e(z,HC),e(z,b1),e(b1,UC),e(g,qC),e(g,H),e(H,L1),e(L1,jC),e(H,QC),e(H,S1),e(S1,$C),e(H,eI),e(H,w1),e(w1,tI),e(H,rI),e(H,M1),e(M1,aI),e(H,lI),e(H,P1),e(P1,nI),e(H,oI),e(H,G1),e(G1,iI),e(g,dI),e(g,U),e(U,N1),e(N1,sI),e(U,hI),e(U,k1),e(k1,cI),e(U,gI),e(U,C1),e(C1,fI),e(U,uI),e(U,I1),e(I1,vI),e(U,EI),e(U,B1),e(B1,TI),e(U,_I),e(U,x1),e(x1,mI),e(g,pI),e(g,q),e(q,O1),e(O1,DI),e(q,AI),e(q,F1),e(F1,RI),e(q,yI),e(q,W1),e(W1,bI),e(q,LI),e(q,J1),e(J1,SI),e(q,wI),e(q,Y1),e(Y1,MI),e(q,PI),e(q,K1),e(K1,GI),e(g,NI),e(g,j),e(j,V1),e(V1,kI),e(j,CI),e(j,Z1),e(Z1,II),e(j,BI),e(j,X1),e(X1,xI),e(j,OI),e(j,z1),e(z1,FI),e(j,WI),e(j,H1),e(H1,JI),e(j,YI),e(j,U1),e(U1,KI),e(g,VI),e(g,Q),e(Q,q1),e(q1,ZI),e(Q,XI),e(Q,j1),e(j1,zI),e(Q,HI),e(Q,Q1),e(Q1,UI),e(Q,qI),e(Q,$1),e($1,jI),e(Q,QI),e(Q,ef),e(ef,$I),e(Q,eB),e(Q,tf),e(tf,tB),e(g,rB),e(g,$),e($,rf),e(rf,aB),e($,lB),e($,af),e(af,nB),e($,oB),e($,lf),e(lf,iB),e($,dB),e($,nf),e(nf,sB),e($,hB),e($,of),e(of,cB),e($,gB),e($,df),e(df,fB),e(g,uB),e(g,ee),e(ee,sf),e(sf,vB),e(ee,EB),e(ee,hf),e(hf,TB),e(ee,_B),e(ee,cf),e(cf,mB),e(ee,pB),e(ee,gf),e(gf,DB),e(ee,AB),e(ee,ff),e(ff,RB),e(ee,yB),e(ee,uf),e(uf,bB),e(g,LB),e(g,te),e(te,vf),e(vf,SB),e(te,wB),e(te,Ef),e(Ef,MB),e(te,PB),e(te,Tf),e(Tf,GB),e(te,NB),e(te,_f),e(_f,kB),e(te,CB),e(te,mf),e(mf,IB),e(te,BB),e(te,pf),e(pf,xB),e(g,OB),e(g,re),e(re,Df),e(Df,FB),e(re,WB),e(re,Af),e(Af,JB),e(re,YB),e(re,Rf),e(Rf,KB),e(re,VB),e(re,yf),e(yf,ZB),e(re,XB),e(re,bf),e(bf,zB),e(re,HB),e(re,Lf),e(Lf,UB),e(g,qB),e(g,ae),e(ae,Sf),e(Sf,jB),e(ae,QB),e(ae,wf),e(wf,$B),e(ae,ex),e(ae,Mf),e(Mf,tx),e(ae,rx),e(ae,Pf),e(Pf,ax),e(ae,lx),e(ae,Gf),e(Gf,nx),e(ae,ox),e(ae,Nf),e(Nf,ix),e(g,dx),e(g,le),e(le,kf),e(kf,sx),e(le,hx),e(le,Cf),e(Cf,cx),e(le,gx),e(le,If),e(If,fx),e(le,ux),e(le,Bf),e(Bf,vx),e(le,Ex),e(le,xf),e(xf,Tx),e(le,_x),e(le,Of),e(Of,mx),e(g,px),e(g,ne),e(ne,Ff),e(Ff,Dx),e(ne,Ax),e(ne,Wf),e(Wf,Rx),e(ne,yx),e(ne,Jf),e(Jf,bx),e(ne,Lx),e(ne,Yf),e(Yf,Sx),e(ne,wx),e(ne,Kf),e(Kf,Mx),e(ne,Px),e(ne,Vf),e(Vf,Gx),e(g,Nx),e(g,oe),e(oe,Zf),e(Zf,kx),e(oe,Cx),e(oe,Xf),e(Xf,Ix),e(oe,Bx),e(oe,zf),e(zf,xx),e(oe,Ox),e(oe,Hf),e(Hf,Fx),e(oe,Wx),e(oe,Uf),e(Uf,Jx),e(oe,Yx),e(oe,qf),e(qf,Kx),e(g,Vx),e(g,ie),e(ie,jf),e(jf,Zx),e(ie,Xx),e(ie,Qf),e(Qf,zx),e(ie,Hx),e(ie,$f),e($f,Ux),e(ie,qx),e(ie,eu),e(eu,jx),e(ie,Qx),e(ie,tu),e(tu,$x),e(ie,eO),e(ie,ru),e(ru,tO),e(g,rO),e(g,de),e(de,au),e(au,aO),e(de,lO),e(de,lu),e(lu,nO),e(de,oO),e(de,nu),e(nu,iO),e(de,dO),e(de,ou),e(ou,sO),e(de,hO),e(de,iu),e(iu,cO),e(de,gO),e(de,du),e(du,fO),e(g,uO),e(g,se),e(se,su),e(su,vO),e(se,EO),e(se,hu),e(hu,TO),e(se,_O),e(se,cu),e(cu,mO),e(se,pO),e(se,gu),e(gu,DO),e(se,AO),e(se,fu),e(fu,RO),e(se,yO),e(se,uu),e(uu,bO),e(g,LO),e(g,he),e(he,vu),e(vu,SO),e(he,wO),e(he,Eu),e(Eu,MO),e(he,PO),e(he,Tu),e(Tu,GO),e(he,NO),e(he,_u),e(_u,kO),e(he,CO),e(he,mu),e(mu,IO),e(he,BO),e(he,pu),e(pu,xO),e(g,OO),e(g,ce),e(ce,Du),e(Du,FO),e(ce,WO),e(ce,Au),e(Au,JO),e(ce,YO),e(ce,Ru),e(Ru,KO),e(ce,VO),e(ce,yu),e(yu,ZO),e(ce,XO),e(ce,bu),e(bu,zO),e(ce,HO),e(ce,Lu),e(Lu,UO),e(g,qO),e(g,ge),e(ge,Su),e(Su,jO),e(ge,QO),e(ge,wu),e(wu,$O),e(ge,eF),e(ge,Mu),e(Mu,tF),e(ge,rF),e(ge,Pu),e(Pu,aF),e(ge,lF),e(ge,Gu),e(Gu,nF),e(ge,oF),e(ge,Nu),e(Nu,iF),e(g,dF),e(g,fe),e(fe,ku),e(ku,sF),e(fe,hF),e(fe,Cu),e(Cu,cF),e(fe,gF),e(fe,Iu),e(Iu,fF),e(fe,uF),e(fe,Bu),e(Bu,vF),e(fe,EF),e(fe,xu),e(xu,TF),e(fe,_F),e(fe,Ou),e(Ou,mF),e(g,pF),e(g,ue),e(ue,Fu),e(Fu,DF),e(ue,AF),e(ue,Wu),e(Wu,RF),e(ue,yF),e(ue,Ju),e(Ju,bF),e(ue,LF),e(ue,Yu),e(Yu,SF),e(ue,wF),e(ue,Ku),e(Ku,MF),e(ue,PF),e(ue,Vu),e(Vu,GF),e(g,NF),e(g,ve),e(ve,Zu),e(Zu,kF),e(ve,CF),e(ve,Xu),e(Xu,IF),e(ve,BF),e(ve,zu),e(zu,xF),e(ve,OF),e(ve,Hu),e(Hu,FF),e(ve,WF),e(ve,Uu),e(Uu,JF),e(ve,YF),e(ve,qu),e(qu,KF),e(g,VF),e(g,Ee),e(Ee,ju),e(ju,ZF),e(Ee,XF),e(Ee,Qu),e(Qu,zF),e(Ee,HF),e(Ee,$u),e($u,UF),e(Ee,qF),e(Ee,ev),e(ev,jF),e(Ee,QF),e(Ee,tv),e(tv,$F),e(Ee,eW),e(Ee,rv),e(rv,tW),e(g,rW),e(g,Te),e(Te,av),e(av,aW),e(Te,lW),e(Te,lv),e(lv,nW),e(Te,oW),e(Te,nv),e(nv,iW),e(Te,dW),e(Te,ov),e(ov,sW),e(Te,hW),e(Te,iv),e(iv,cW),e(Te,gW),e(Te,dv),e(dv,fW),e(g,uW),e(g,_e),e(_e,sv),e(sv,vW),e(_e,EW),e(_e,hv),e(hv,TW),e(_e,_W),e(_e,cv),e(cv,mW),e(_e,pW),e(_e,gv),e(gv,DW),e(_e,AW),e(_e,fv),e(fv,RW),e(_e,yW),e(_e,uv),e(uv,bW),e(g,LW),e(g,me),e(me,vv),e(vv,SW),e(me,wW),e(me,Ev),e(Ev,MW),e(me,PW),e(me,Tv),e(Tv,GW),e(me,NW),e(me,_v),e(_v,kW),e(me,CW),e(me,mv),e(mv,IW),e(me,BW),e(me,pv),e(pv,xW),e(g,OW),e(g,pe),e(pe,Dv),e(Dv,FW),e(pe,WW),e(pe,Av),e(Av,JW),e(pe,YW),e(pe,Rv),e(Rv,KW),e(pe,VW),e(pe,yv),e(yv,ZW),e(pe,XW),e(pe,bv),e(bv,zW),e(pe,HW),e(pe,Lv),e(Lv,UW),e(g,qW),e(g,De),e(De,Sv),e(Sv,jW),e(De,QW),e(De,wv),e(wv,$W),e(De,eJ),e(De,Mv),e(Mv,tJ),e(De,rJ),e(De,Pv),e(Pv,aJ),e(De,lJ),e(De,Gv),e(Gv,nJ),e(De,oJ),e(De,Nv),e(Nv,iJ),e(g,dJ),e(g,Ae),e(Ae,kv),e(kv,sJ),e(Ae,hJ),e(Ae,Cv),e(Cv,cJ),e(Ae,gJ),e(Ae,Iv),e(Iv,fJ),e(Ae,uJ),e(Ae,Bv),e(Bv,vJ),e(Ae,EJ),e(Ae,xv),e(xv,TJ),e(Ae,_J),e(Ae,Ov),e(Ov,mJ),e(g,pJ),e(g,Re),e(Re,Fv),e(Fv,DJ),e(Re,AJ),e(Re,Wv),e(Wv,RJ),e(Re,yJ),e(Re,Jv),e(Jv,bJ),e(Re,LJ),e(Re,Yv),e(Yv,SJ),e(Re,wJ),e(Re,Kv),e(Kv,MJ),e(Re,PJ),e(Re,Vv),e(Vv,GJ),e(g,NJ),e(g,ye),e(ye,Zv),e(Zv,kJ),e(ye,CJ),e(ye,Xv),e(Xv,IJ),e(ye,BJ),e(ye,zv),e(zv,xJ),e(ye,OJ),e(ye,Hv),e(Hv,FJ),e(ye,WJ),e(ye,Uv),e(Uv,JJ),e(ye,YJ),e(ye,qv),e(qv,KJ),e(g,VJ),e(g,be),e(be,jv),e(jv,ZJ),e(be,XJ),e(be,Qv),e(Qv,zJ),e(be,HJ),e(be,$v),e($v,UJ),e(be,qJ),e(be,eE),e(eE,jJ),e(be,QJ),e(be,tE),e(tE,$J),e(be,eY),e(be,rE),e(rE,tY),e(g,rY),e(g,Le),e(Le,aE),e(aE,aY),e(Le,lY),e(Le,lE),e(lE,nY),e(Le,oY),e(Le,nE),e(nE,iY),e(Le,dY),e(Le,oE),e(oE,sY),e(Le,hY),e(Le,iE),e(iE,cY),e(Le,gY),e(Le,dE),e(dE,fY),e(g,uY),e(g,Se),e(Se,sE),e(sE,vY),e(Se,EY),e(Se,hE),e(hE,TY),e(Se,_Y),e(Se,cE),e(cE,mY),e(Se,pY),e(Se,gE),e(gE,DY),e(Se,AY),e(Se,fE),e(fE,RY),e(Se,yY),e(Se,uE),e(uE,bY),e(g,LY),e(g,we),e(we,vE),e(vE,SY),e(we,wY),e(we,EE),e(EE,MY),e(we,PY),e(we,TE),e(TE,GY),e(we,NY),e(we,_E),e(_E,kY),e(we,CY),e(we,mE),e(mE,IY),e(we,BY),e(we,pE),e(pE,xY),e(g,OY),e(g,Me),e(Me,DE),e(DE,FY),e(Me,WY),e(Me,AE),e(AE,JY),e(Me,YY),e(Me,RE),e(RE,KY),e(Me,VY),e(Me,yE),e(yE,ZY),e(Me,XY),e(Me,bE),e(bE,zY),e(Me,HY),e(Me,LE),e(LE,UY),e(g,qY),e(g,Pe),e(Pe,SE),e(SE,jY),e(Pe,QY),e(Pe,wE),e(wE,$Y),e(Pe,eK),e(Pe,ME),e(ME,tK),e(Pe,rK),e(Pe,PE),e(PE,aK),e(Pe,lK),e(Pe,GE),e(GE,nK),e(Pe,oK),e(Pe,NE),e(NE,iK),e(g,dK),e(g,Ge),e(Ge,kE),e(kE,sK),e(Ge,hK),e(Ge,CE),e(CE,cK),e(Ge,gK),e(Ge,IE),e(IE,fK),e(Ge,uK),e(Ge,BE),e(BE,vK),e(Ge,EK),e(Ge,xE),e(xE,TK),e(Ge,_K),e(Ge,OE),e(OE,mK),e(g,pK),e(g,Ne),e(Ne,FE),e(FE,DK),e(Ne,AK),e(Ne,WE),e(WE,RK),e(Ne,yK),e(Ne,JE),e(JE,bK),e(Ne,LK),e(Ne,YE),e(YE,SK),e(Ne,wK),e(Ne,KE),e(KE,MK),e(Ne,PK),e(Ne,VE),e(VE,GK),e(g,NK),e(g,ke),e(ke,ZE),e(ZE,kK),e(ke,CK),e(ke,XE),e(XE,IK),e(ke,BK),e(ke,zE),e(zE,xK),e(ke,OK),e(ke,HE),e(HE,FK),e(ke,WK),e(ke,UE),e(UE,JK),e(ke,YK),e(ke,qE),e(qE,KK),e(g,VK),e(g,Ce),e(Ce,jE),e(jE,ZK),e(Ce,XK),e(Ce,QE),e(QE,zK),e(Ce,HK),e(Ce,$E),e($E,UK),e(Ce,qK),e(Ce,e2),e(e2,jK),e(Ce,QK),e(Ce,t2),e(t2,$K),e(Ce,eV),e(Ce,r2),e(r2,tV),e(g,rV),e(g,Ie),e(Ie,a2),e(a2,aV),e(Ie,lV),e(Ie,l2),e(l2,nV),e(Ie,oV),e(Ie,n2),e(n2,iV),e(Ie,dV),e(Ie,o2),e(o2,sV),e(Ie,hV),e(Ie,i2),e(i2,cV),e(Ie,gV),e(Ie,d2),e(d2,fV),e(g,uV),e(g,Be),e(Be,s2),e(s2,vV),e(Be,EV),e(Be,h2),e(h2,TV),e(Be,_V),e(Be,c2),e(c2,mV),e(Be,pV),e(Be,g2),e(g2,DV),e(Be,AV),e(Be,f2),e(f2,RV),e(Be,yV),e(Be,u2),e(u2,bV),e(g,LV),e(g,xe),e(xe,v2),e(v2,SV),e(xe,wV),e(xe,E2),e(E2,MV),e(xe,PV),e(xe,T2),e(T2,GV),e(xe,NV),e(xe,_2),e(_2,kV),e(xe,CV),e(xe,m2),e(m2,IV),e(xe,BV),e(xe,p2),e(p2,xV),e(g,OV),e(g,Oe),e(Oe,D2),e(D2,FV),e(Oe,WV),e(Oe,A2),e(A2,JV),e(Oe,YV),e(Oe,R2),e(R2,KV),e(Oe,VV),e(Oe,y2),e(y2,ZV),e(Oe,XV),e(Oe,b2),e(b2,zV),e(Oe,HV),e(Oe,L2),e(L2,UV),e(g,qV),e(g,Fe),e(Fe,S2),e(S2,jV),e(Fe,QV),e(Fe,w2),e(w2,$V),e(Fe,eZ),e(Fe,M2),e(M2,tZ),e(Fe,rZ),e(Fe,P2),e(P2,aZ),e(Fe,lZ),e(Fe,G2),e(G2,nZ),e(Fe,oZ),e(Fe,N2),e(N2,iZ),e(g,dZ),e(g,We),e(We,k2),e(k2,sZ),e(We,hZ),e(We,C2),e(C2,cZ),e(We,gZ),e(We,I2),e(I2,fZ),e(We,uZ),e(We,B2),e(B2,vZ),e(We,EZ),e(We,x2),e(x2,TZ),e(We,_Z),e(We,O2),e(O2,mZ),e(g,pZ),e(g,Je),e(Je,F2),e(F2,DZ),e(Je,AZ),e(Je,W2),e(W2,RZ),e(Je,yZ),e(Je,J2),e(J2,bZ),e(Je,LZ),e(Je,Y2),e(Y2,SZ),e(Je,wZ),e(Je,K2),e(K2,MZ),e(Je,PZ),e(Je,V2),e(V2,GZ),e(g,NZ),e(g,Ye),e(Ye,Z2),e(Z2,kZ),e(Ye,CZ),e(Ye,X2),e(X2,IZ),e(Ye,BZ),e(Ye,z2),e(z2,xZ),e(Ye,OZ),e(Ye,H2),e(H2,FZ),e(Ye,WZ),e(Ye,U2),e(U2,JZ),e(Ye,YZ),e(Ye,q2),e(q2,KZ),e(g,VZ),e(g,Ke),e(Ke,j2),e(j2,ZZ),e(Ke,XZ),e(Ke,Q2),e(Q2,zZ),e(Ke,HZ),e(Ke,$2),e($2,UZ),e(Ke,qZ),e(Ke,eT),e(eT,jZ),e(Ke,QZ),e(Ke,tT),e(tT,$Z),e(Ke,eX),e(Ke,rT),e(rT,tX),e(g,rX),e(g,Ve),e(Ve,aT),e(aT,aX),e(Ve,lX),e(Ve,lT),e(lT,nX),e(Ve,oX),e(Ve,nT),e(nT,iX),e(Ve,dX),e(Ve,oT),e(oT,sX),e(Ve,hX),e(Ve,iT),e(iT,cX),e(Ve,gX),e(Ve,dT),e(dT,fX),e(g,uX),e(g,Ze),e(Ze,sT),e(sT,vX),e(Ze,EX),e(Ze,hT),e(hT,TX),e(Ze,_X),e(Ze,cT),e(cT,mX),e(Ze,pX),e(Ze,gT),e(gT,DX),e(Ze,AX),e(Ze,fT),e(fT,RX),e(Ze,yX),e(Ze,uT),e(uT,bX),e(g,LX),e(g,Xe),e(Xe,vT),e(vT,SX),e(Xe,wX),e(Xe,ET),e(ET,MX),e(Xe,PX),e(Xe,TT),e(TT,GX),e(Xe,NX),e(Xe,_T),e(_T,kX),e(Xe,CX),e(Xe,mT),e(mT,IX),e(Xe,BX),e(Xe,pT),e(pT,xX),e(g,OX),e(g,ze),e(ze,DT),e(DT,FX),e(ze,WX),e(ze,AT),e(AT,JX),e(ze,YX),e(ze,RT),e(RT,KX),e(ze,VX),e(ze,yT),e(yT,ZX),e(ze,XX),e(ze,bT),e(bT,zX),e(ze,HX),e(ze,LT),e(LT,UX),e(g,qX),e(g,He),e(He,ST),e(ST,jX),e(He,QX),e(He,wT),e(wT,$X),e(He,ez),e(He,MT),e(MT,tz),e(He,rz),e(He,PT),e(PT,az),e(He,lz),e(He,GT),e(GT,nz),e(He,oz),e(He,NT),e(NT,iz),e(g,dz),e(g,Ue),e(Ue,kT),e(kT,sz),e(Ue,hz),e(Ue,CT),e(CT,cz),e(Ue,gz),e(Ue,IT),e(IT,fz),e(Ue,uz),e(Ue,BT),e(BT,vz),e(Ue,Ez),e(Ue,xT),e(xT,Tz),e(Ue,_z),e(Ue,OT),e(OT,mz),e(g,pz),e(g,qe),e(qe,FT),e(FT,Dz),e(qe,Az),e(qe,WT),e(WT,Rz),e(qe,yz),e(qe,JT),e(JT,bz),e(qe,Lz),e(qe,YT),e(YT,Sz),e(qe,wz),e(qe,KT),e(KT,Mz),e(qe,Pz),e(qe,VT),e(VT,Gz),e(g,Nz),e(g,je),e(je,ZT),e(ZT,kz),e(je,Cz),e(je,XT),e(XT,Iz),e(je,Bz),e(je,zT),e(zT,xz),e(je,Oz),e(je,HT),e(HT,Fz),e(je,Wz),e(je,UT),e(UT,Jz),e(je,Yz),e(je,qT),e(qT,Kz),e(g,Vz),e(g,Qe),e(Qe,jT),e(jT,Zz),e(Qe,Xz),e(Qe,QT),e(QT,zz),e(Qe,Hz),e(Qe,$T),e($T,Uz),e(Qe,qz),e(Qe,e_),e(e_,jz),e(Qe,Qz),e(Qe,t_),e(t_,$z),e(Qe,eH),e(Qe,r_),e(r_,tH),e(g,rH),e(g,$e),e($e,a_),e(a_,aH),e($e,lH),e($e,l_),e(l_,nH),e($e,oH),e($e,n_),e(n_,iH),e($e,dH),e($e,o_),e(o_,sH),e($e,hH),e($e,i_),e(i_,cH),e($e,gH),e($e,d_),e(d_,fH),e(g,uH),e(g,et),e(et,s_),e(s_,vH),e(et,EH),e(et,h_),e(h_,TH),e(et,_H),e(et,c_),e(c_,mH),e(et,pH),e(et,g_),e(g_,DH),e(et,AH),e(et,f_),e(f_,RH),e(et,yH),e(et,u_),e(u_,bH),e(g,LH),e(g,tt),e(tt,v_),e(v_,SH),e(tt,wH),e(tt,E_),e(E_,MH),e(tt,PH),e(tt,T_),e(T_,GH),e(tt,NH),e(tt,__),e(__,kH),e(tt,CH),e(tt,m_),e(m_,IH),e(tt,BH),e(tt,p_),e(p_,xH),e(g,OH),e(g,rt),e(rt,D_),e(D_,FH),e(rt,WH),e(rt,A_),e(A_,JH),e(rt,YH),e(rt,R_),e(R_,KH),e(rt,VH),e(rt,y_),e(y_,ZH),e(rt,XH),e(rt,b_),e(b_,zH),e(rt,HH),e(rt,L_),e(L_,UH),e(g,qH),e(g,at),e(at,S_),e(S_,jH),e(at,QH),e(at,w_),e(w_,$H),e(at,eU),e(at,M_),e(M_,tU),e(at,rU),e(at,P_),e(P_,aU),e(at,lU),e(at,G_),e(G_,nU),e(at,oU),e(at,N_),e(N_,iU),e(g,dU),e(g,lt),e(lt,k_),e(k_,sU),e(lt,hU),e(lt,C_),e(C_,cU),e(lt,gU),e(lt,I_),e(I_,fU),e(lt,uU),e(lt,B_),e(B_,vU),e(lt,EU),e(lt,x_),e(x_,TU),e(lt,_U),e(lt,O_),e(O_,mU),e(g,pU),e(g,nt),e(nt,F_),e(F_,DU),e(nt,AU),e(nt,W_),e(W_,RU),e(nt,yU),e(nt,J_),e(J_,bU),e(nt,LU),e(nt,Y_),e(Y_,SU),e(nt,wU),e(nt,K_),e(K_,MU),e(nt,PU),e(nt,V_),e(V_,GU),e(g,NU),e(g,ot),e(ot,Z_),e(Z_,kU),e(ot,CU),e(ot,X_),e(X_,IU),e(ot,BU),e(ot,z_),e(z_,xU),e(ot,OU),e(ot,H_),e(H_,FU),e(ot,WU),e(ot,U_),e(U_,JU),e(ot,YU),e(ot,q_),e(q_,KU),e(g,VU),e(g,it),e(it,j_),e(j_,ZU),e(it,XU),e(it,Q_),e(Q_,zU),e(it,HU),e(it,$_),e($_,UU),e(it,qU),e(it,e3),e(e3,jU),e(it,QU),e(it,t3),e(t3,$U),e(it,eq),e(it,r3),e(r3,tq),e(g,rq),e(g,dt),e(dt,a3),e(a3,aq),e(dt,lq),e(dt,l3),e(l3,nq),e(dt,oq),e(dt,n3),e(n3,iq),e(dt,dq),e(dt,o3),e(o3,sq),e(dt,hq),e(dt,i3),e(i3,cq),e(dt,gq),e(dt,d3),e(d3,fq),e(g,uq),e(g,st),e(st,s3),e(s3,vq),e(st,Eq),e(st,h3),e(h3,Tq),e(st,_q),e(st,c3),e(c3,mq),e(st,pq),e(st,g3),e(g3,Dq),e(st,Aq),e(st,f3),e(f3,Rq),e(st,yq),e(st,u3),e(u3,bq),e(g,Lq),e(g,ht),e(ht,v3),e(v3,Sq),e(ht,wq),e(ht,E3),e(E3,Mq),e(ht,Pq),e(ht,T3),e(T3,Gq),e(ht,Nq),e(ht,_3),e(_3,kq),e(ht,Cq),e(ht,m3),e(m3,Iq),e(ht,Bq),e(ht,p3),e(p3,xq),e(g,Oq),e(g,ct),e(ct,D3),e(D3,Fq),e(ct,Wq),e(ct,A3),e(A3,Jq),e(ct,Yq),e(ct,R3),e(R3,Kq),e(ct,Vq),e(ct,y3),e(y3,Zq),e(ct,Xq),e(ct,b3),e(b3,zq),e(ct,Hq),e(ct,L3),e(L3,Uq),e(g,qq),e(g,gt),e(gt,S3),e(S3,jq),e(gt,Qq),e(gt,w3),e(w3,$q),e(gt,ej),e(gt,M3),e(M3,tj),e(gt,rj),e(gt,P3),e(P3,aj),e(gt,lj),e(gt,G3),e(G3,nj),e(gt,oj),e(gt,N3),e(N3,ij),e(g,dj),e(g,ft),e(ft,k3),e(k3,sj),e(ft,hj),e(ft,C3),e(C3,cj),e(ft,gj),e(ft,I3),e(I3,fj),e(ft,uj),e(ft,B3),e(B3,vj),e(ft,Ej),e(ft,x3),e(x3,Tj),e(ft,_j),e(ft,O3),e(O3,mj),Kp=!0},p:fve,i(u){Kp||(Ap(Nn.$$.fragment,u),Ap(Bn.$$.fragment,u),Ap(xn.$$.fragment,u),Ap(On.$$.fragment,u),Ap(Xi.$$.fragment,u),Kp=!0)},o(u){Rp(Nn.$$.fragment,u),Rp(Bn.$$.fragment,u),Rp(xn.$$.fragment,u),Rp(On.$$.fragment,u),Rp(Xi.$$.fragment,u),Kp=!1},d(u){t(Da),u&&t(Lp),u&&t(Aa),yp(Nn),u&&t(Sp),u&&t($i),u&&t(wp),u&&t(vt),u&&t(Mp),u&&t(Et),u&&t(Pp),u&&t(ed),u&&t(Gp),u&&t(Ra),yp(Bn),u&&t(Np),u&&t(ya),u&&t(kp),u&&t(Cp),u&&t(ba),yp(xn),u&&t(Ip),u&&t(td),u&&t(Bp),u&&t(T),u&&t(xp),u&&t(hd),u&&t(Op),u&&t(La),yp(On),u&&t(Fp),u&&t(h),u&&t(Wp),u&&t(Sa),yp(Xi),u&&t(Jp),u&&t(fh),u&&t(Yp),u&&t(Mn)}}}const Eve={local:"transformers",sections:[{local:"se-voc-estiver-procurando-suporte-do-time-da-hugging-face-acesse",title:"Se voc\xEA estiver procurando suporte do time da Hugging Face, acesse"},{local:"contedo",sections:[{local:"modelos-atuais",title:"Modelos atuais"},{local:"frameworks-aceitos",title:"Frameworks aceitos"}],title:"Conte\xFAdo"}],title:"\u{1F917} Transformers"};function Tve(Mj){return uve(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class pve extends dve{constructor(Da){super();sve(this,Da,Tve,vve,hve,{})}}export{pve as default,Eve as metadata};
481
0
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/_app
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/_app/pages/fast_tokenizers.mdx-hf-doc-builder.js
import{S as bo,i as Po,s as wo,e as r,k as m,w as C,t as i,M as qo,c as t,d as o,m as d,a as n,x as O,h as l,b as f,G as a,g as p,y as F,L as yo,q as x,o as B,B as J,v as Ao}from"../chunks/vendor-hf-doc-builder.js";import{I as ao}from"../chunks/IconCopyLink-hf-doc-builder.js";import{C as be}from"../chunks/CodeBlock-hf-doc-builder.js";function So(so){let h,re,z,_,I,b,Pe,G,we,te,c,qe,R,ye,Ae,P,Se,Ne,ne,U,Ce,ie,w,le,D,Oe,pe,v,$,Q,q,Fe,X,xe,me,u,Be,Y,Je,Ue,Z,De,Ke,de,y,fe,j,Me,K,Ve,Le,ce,g,T,ee,A,We,oe,He,ue,M,Ie,ke,S,he,k,Ge,ae,Re,Qe,se,Xe,Ye,ze,N,ve,E,Ze,V,eo,oo,ge;return b=new ao({}),w=new be({props:{code:`from tokenizers import Tokenizer from tokenizers.models import BPE from tokenizers.trainers import BpeTrainer from tokenizers.pre_tokenizers import Whitespace tokenizer = Tokenizer(BPE(unk_token="[UNK]")) trainer = BpeTrainer(special_tokens=["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"]) tokenizer.pre_tokenizer = Whitespace() files = [...] tokenizer.train(files, trainer)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> tokenizers <span class="hljs-keyword">import</span> Tokenizer <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> tokenizers.models <span class="hljs-keyword">import</span> BPE <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> tokenizers.trainers <span class="hljs-keyword">import</span> BpeTrainer <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> tokenizers.pre_tokenizers <span class="hljs-keyword">import</span> Whitespace <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = Tokenizer(BPE(unk_token=<span class="hljs-string">&quot;[UNK]&quot;</span>)) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = BpeTrainer(special_tokens=[<span class="hljs-string">&quot;[UNK]&quot;</span>, <span class="hljs-string">&quot;[CLS]&quot;</span>, <span class="hljs-string">&quot;[SEP]&quot;</span>, <span class="hljs-string">&quot;[PAD]&quot;</span>, <span class="hljs-string">&quot;[MASK]&quot;</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.pre_tokenizer = Whitespace() <span class="hljs-meta">&gt;&gt;&gt; </span>files = [...] <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.train(files, trainer)`}}),q=new ao({}),y=new be({props:{code:`from transformers import PreTrainedTokenizerFast fast_tokenizer = PreTrainedTokenizerFast(tokenizer_object=tokenizer)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PreTrainedTokenizerFast <span class="hljs-meta">&gt;&gt;&gt; </span>fast_tokenizer = PreTrainedTokenizerFast(tokenizer_object=tokenizer)`}}),A=new ao({}),S=new be({props:{code:'tokenizer.save("tokenizer.json")',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.save(<span class="hljs-string">&quot;tokenizer.json&quot;</span>)'}}),N=new be({props:{code:`from transformers import PreTrainedTokenizerFast fast_tokenizer = PreTrainedTokenizerFast(tokenizer_file="tokenizer.json")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> PreTrainedTokenizerFast <span class="hljs-meta">&gt;&gt;&gt; </span>fast_tokenizer = PreTrainedTokenizerFast(tokenizer_file=<span class="hljs-string">&quot;tokenizer.json&quot;</span>)`}}),{c(){h=r("meta"),re=m(),z=r("h1"),_=r("a"),I=r("span"),C(b.$$.fragment),Pe=m(),G=r("span"),we=i("Usando os Tokenizers do \u{1F917} Tokenizers"),te=m(),c=r("p"),qe=i("O "),R=r("code"),ye=i("PreTrainedTokenizerFast"),Ae=i(" depende da biblioteca "),P=r("a"),Se=i("\u{1F917} Tokenizers"),Ne=i(". O Tokenizer obtido da biblioteca \u{1F917} Tokenizers pode ser carregado facilmente pelo \u{1F917} Transformers."),ne=m(),U=r("p"),Ce=i("Antes de entrar nos detalhes, vamos come\xE7ar criando um tokenizer fict\xEDcio em algumas linhas:"),ie=m(),C(w.$$.fragment),le=m(),D=r("p"),Oe=i("Agora temos um tokenizer treinado nos arquivos que foram definidos. N\xF3s podemos continuar usando nessa execu\xE7\xE3o ou salvar em um arquivo JSON para re-utilizar no futuro."),pe=m(),v=r("h2"),$=r("a"),Q=r("span"),C(q.$$.fragment),Fe=m(),X=r("span"),xe=i("Carregando diretamente de um objeto tokenizer"),me=m(),u=r("p"),Be=i("Vamos ver como aproveitar esse objeto tokenizer na biblioteca \u{1F917} Transformers. A classe "),Y=r("code"),Je=i("PreTrainedTokenizerFast"),Ue=i(" permite uma instancia\xE7\xE3o f\xE1cil, aceitando o objeto "),Z=r("em"),De=i("tokenizer"),Ke=i(" instanciado como um argumento:"),de=m(),C(y.$$.fragment),fe=m(),j=r("p"),Me=i("Esse objeto pode ser utilizado com todos os m\xE9todos compartilhados pelos tokenizers dos \u{1F917} Transformers! V\xE1 para "),K=r("a"),Ve=i("a p\xE1gina do tokenizer"),Le=i(" para mais informa\xE7\xF5es."),ce=m(),g=r("h2"),T=r("a"),ee=r("span"),C(A.$$.fragment),We=m(),oe=r("span"),He=i("Carregando de um arquivo JSON"),ue=m(),M=r("p"),Ie=i("Para carregar um tokenizer de um arquivo JSON vamos primeiro come\xE7ar salvando nosso tokenizer:"),ke=m(),C(S.$$.fragment),he=m(),k=r("p"),Ge=i("A pasta para qual salvamos esse arquivo pode ser passada para o m\xE9todo de inicializa\xE7\xE3o do "),ae=r("code"),Re=i("PreTrainedTokenizerFast"),Qe=i(" usando o "),se=r("code"),Xe=i("tokenizer_file"),Ye=i(" par\xE2metro:"),ze=m(),C(N.$$.fragment),ve=m(),E=r("p"),Ze=i("Esse objeto pode ser utilizado com todos os m\xE9todos compartilhados pelos tokenizers dos \u{1F917} Transformers! V\xE1 para "),V=r("a"),eo=i("a p\xE1gina do tokenizer"),oo=i(" para mais informa\xE7\xF5es."),this.h()},l(e){const s=qo('[data-svelte="svelte-1phssyn"]',document.head);h=t(s,"META",{name:!0,content:!0}),s.forEach(o),re=d(e),z=t(e,"H1",{class:!0});var _e=n(z);_=t(_e,"A",{id:!0,class:!0,href:!0});var ro=n(_);I=t(ro,"SPAN",{});var to=n(I);O(b.$$.fragment,to),to.forEach(o),ro.forEach(o),Pe=d(_e),G=t(_e,"SPAN",{});var no=n(G);we=l(no,"Usando os Tokenizers do \u{1F917} Tokenizers"),no.forEach(o),_e.forEach(o),te=d(e),c=t(e,"P",{});var L=n(c);qe=l(L,"O "),R=t(L,"CODE",{});var io=n(R);ye=l(io,"PreTrainedTokenizerFast"),io.forEach(o),Ae=l(L," depende da biblioteca "),P=t(L,"A",{href:!0,rel:!0});var lo=n(P);Se=l(lo,"\u{1F917} Tokenizers"),lo.forEach(o),Ne=l(L,". O Tokenizer obtido da biblioteca \u{1F917} Tokenizers pode ser carregado facilmente pelo \u{1F917} Transformers."),L.forEach(o),ne=d(e),U=t(e,"P",{});var po=n(U);Ce=l(po,"Antes de entrar nos detalhes, vamos come\xE7ar criando um tokenizer fict\xEDcio em algumas linhas:"),po.forEach(o),ie=d(e),O(w.$$.fragment,e),le=d(e),D=t(e,"P",{});var mo=n(D);Oe=l(mo,"Agora temos um tokenizer treinado nos arquivos que foram definidos. N\xF3s podemos continuar usando nessa execu\xE7\xE3o ou salvar em um arquivo JSON para re-utilizar no futuro."),mo.forEach(o),pe=d(e),v=t(e,"H2",{class:!0});var $e=n(v);$=t($e,"A",{id:!0,class:!0,href:!0});var fo=n($);Q=t(fo,"SPAN",{});var co=n(Q);O(q.$$.fragment,co),co.forEach(o),fo.forEach(o),Fe=d($e),X=t($e,"SPAN",{});var uo=n(X);xe=l(uo,"Carregando diretamente de um objeto tokenizer"),uo.forEach(o),$e.forEach(o),me=d(e),u=t(e,"P",{});var W=n(u);Be=l(W,"Vamos ver como aproveitar esse objeto tokenizer na biblioteca \u{1F917} Transformers. A classe "),Y=t(W,"CODE",{});var ko=n(Y);Je=l(ko,"PreTrainedTokenizerFast"),ko.forEach(o),Ue=l(W," permite uma instancia\xE7\xE3o f\xE1cil, aceitando o objeto "),Z=t(W,"EM",{});var ho=n(Z);De=l(ho,"tokenizer"),ho.forEach(o),Ke=l(W," instanciado como um argumento:"),W.forEach(o),de=d(e),O(y.$$.fragment,e),fe=d(e),j=t(e,"P",{});var je=n(j);Me=l(je,"Esse objeto pode ser utilizado com todos os m\xE9todos compartilhados pelos tokenizers dos \u{1F917} Transformers! V\xE1 para "),K=t(je,"A",{href:!0});var zo=n(K);Ve=l(zo,"a p\xE1gina do tokenizer"),zo.forEach(o),Le=l(je," para mais informa\xE7\xF5es."),je.forEach(o),ce=d(e),g=t(e,"H2",{class:!0});var Te=n(g);T=t(Te,"A",{id:!0,class:!0,href:!0});var vo=n(T);ee=t(vo,"SPAN",{});var go=n(ee);O(A.$$.fragment,go),go.forEach(o),vo.forEach(o),We=d(Te),oe=t(Te,"SPAN",{});var _o=n(oe);He=l(_o,"Carregando de um arquivo JSON"),_o.forEach(o),Te.forEach(o),ue=d(e),M=t(e,"P",{});var $o=n(M);Ie=l($o,"Para carregar um tokenizer de um arquivo JSON vamos primeiro come\xE7ar salvando nosso tokenizer:"),$o.forEach(o),ke=d(e),O(S.$$.fragment,e),he=d(e),k=t(e,"P",{});var H=n(k);Ge=l(H,"A pasta para qual salvamos esse arquivo pode ser passada para o m\xE9todo de inicializa\xE7\xE3o do "),ae=t(H,"CODE",{});var jo=n(ae);Re=l(jo,"PreTrainedTokenizerFast"),jo.forEach(o),Qe=l(H," usando o "),se=t(H,"CODE",{});var To=n(se);Xe=l(To,"tokenizer_file"),To.forEach(o),Ye=l(H," par\xE2metro:"),H.forEach(o),ze=d(e),O(N.$$.fragment,e),ve=d(e),E=t(e,"P",{});var Ee=n(E);Ze=l(Ee,"Esse objeto pode ser utilizado com todos os m\xE9todos compartilhados pelos tokenizers dos \u{1F917} Transformers! V\xE1 para "),V=t(Ee,"A",{href:!0});var Eo=n(V);eo=l(Eo,"a p\xE1gina do tokenizer"),Eo.forEach(o),oo=l(Ee," para mais informa\xE7\xF5es."),Ee.forEach(o),this.h()},h(){f(h,"name","hf:doc:metadata"),f(h,"content",JSON.stringify(No)),f(_,"id","usando-os-tokenizers-do-tokenizers"),f(_,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(_,"href","#usando-os-tokenizers-do-tokenizers"),f(z,"class","relative group"),f(P,"href","https://huggingface.co/docs/tokenizers"),f(P,"rel","nofollow"),f($,"id","carregando-diretamente-de-um-objeto-tokenizer"),f($,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f($,"href","#carregando-diretamente-de-um-objeto-tokenizer"),f(v,"class","relative group"),f(K,"href","main_classes/tokenizer"),f(T,"id","carregando-de-um-arquivo-json"),f(T,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),f(T,"href","#carregando-de-um-arquivo-json"),f(g,"class","relative group"),f(V,"href","main_classes/tokenizer")},m(e,s){a(document.head,h),p(e,re,s),p(e,z,s),a(z,_),a(_,I),F(b,I,null),a(z,Pe),a(z,G),a(G,we),p(e,te,s),p(e,c,s),a(c,qe),a(c,R),a(R,ye),a(c,Ae),a(c,P),a(P,Se),a(c,Ne),p(e,ne,s),p(e,U,s),a(U,Ce),p(e,ie,s),F(w,e,s),p(e,le,s),p(e,D,s),a(D,Oe),p(e,pe,s),p(e,v,s),a(v,$),a($,Q),F(q,Q,null),a(v,Fe),a(v,X),a(X,xe),p(e,me,s),p(e,u,s),a(u,Be),a(u,Y),a(Y,Je),a(u,Ue),a(u,Z),a(Z,De),a(u,Ke),p(e,de,s),F(y,e,s),p(e,fe,s),p(e,j,s),a(j,Me),a(j,K),a(K,Ve),a(j,Le),p(e,ce,s),p(e,g,s),a(g,T),a(T,ee),F(A,ee,null),a(g,We),a(g,oe),a(oe,He),p(e,ue,s),p(e,M,s),a(M,Ie),p(e,ke,s),F(S,e,s),p(e,he,s),p(e,k,s),a(k,Ge),a(k,ae),a(ae,Re),a(k,Qe),a(k,se),a(se,Xe),a(k,Ye),p(e,ze,s),F(N,e,s),p(e,ve,s),p(e,E,s),a(E,Ze),a(E,V),a(V,eo),a(E,oo),ge=!0},p:yo,i(e){ge||(x(b.$$.fragment,e),x(w.$$.fragment,e),x(q.$$.fragment,e),x(y.$$.fragment,e),x(A.$$.fragment,e),x(S.$$.fragment,e),x(N.$$.fragment,e),ge=!0)},o(e){B(b.$$.fragment,e),B(w.$$.fragment,e),B(q.$$.fragment,e),B(y.$$.fragment,e),B(A.$$.fragment,e),B(S.$$.fragment,e),B(N.$$.fragment,e),ge=!1},d(e){o(h),e&&o(re),e&&o(z),J(b),e&&o(te),e&&o(c),e&&o(ne),e&&o(U),e&&o(ie),J(w,e),e&&o(le),e&&o(D),e&&o(pe),e&&o(v),J(q),e&&o(me),e&&o(u),e&&o(de),J(y,e),e&&o(fe),e&&o(j),e&&o(ce),e&&o(g),J(A),e&&o(ue),e&&o(M),e&&o(ke),J(S,e),e&&o(he),e&&o(k),e&&o(ze),J(N,e),e&&o(ve),e&&o(E)}}}const No={local:"usando-os-tokenizers-do-tokenizers",sections:[{local:"carregando-diretamente-de-um-objeto-tokenizer",title:"Carregando diretamente de um objeto tokenizer"},{local:"carregando-de-um-arquivo-json",title:"Carregando de um arquivo JSON"}],title:"Usando os Tokenizers do \u{1F917} Tokenizers"};function Co(so){return Ao(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class Bo extends bo{constructor(h){super();Po(this,h,Co,So,wo,{})}}export{Bo as default,No as metadata};
482
0
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/_app
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/_app/pages/pipeline_tutorial.mdx-hf-doc-builder.js
import{S as no,i as io,s as po,e as r,k as m,w as u,t,M as mo,c as n,d as s,m as c,a as i,x as h,h as o,b as d,N as co,G as a,g as p,y as g,q as v,o as $,B as _,v as fo}from"../chunks/vendor-hf-doc-builder.js";import{T as uo}from"../chunks/Tip-hf-doc-builder.js";import{I as ta}from"../chunks/IconCopyLink-hf-doc-builder.js";import{C as D}from"../chunks/CodeBlock-hf-doc-builder.js";function ho(oa){let f,H,x,w,M;return{c(){f=r("p"),H=t("Acesse a documenta\xE7\xE3o do "),x=r("code"),w=t("pipeline()"),M=t(" para obter uma lista completa de tarefas poss\xEDveis.")},l(k){f=n(k,"P",{});var b=i(f);H=o(b,"Acesse a documenta\xE7\xE3o do "),x=n(b,"CODE",{});var U=i(x);w=o(U,"pipeline()"),U.forEach(s),M=o(b," para obter uma lista completa de tarefas poss\xEDveis."),b.forEach(s)},m(k,b){p(k,f,b),a(f,H),a(f,x),a(x,w),a(f,M)},d(k){k&&s(f)}}}function go(oa){let f,H,x,w,M,k,b,U,Va,la,N,Wa,X,Xa,Ya,ra,P,Y,Za,Pe,es,as,ss,ye,ts,os,Z,ls,Ae,rs,ns,na,F,ia,O,I,ze,ee,is,Ce,ps,pa,q,ms,De,cs,ds,Me,fs,us,Oe,hs,gs,ma,xe,ae,vs,Se,$s,_s,ca,se,da,te,oe,xs,Le,Es,js,fa,le,ua,Ee,ks,ha,re,ga,E,ws,Te,qs,bs,He,Ps,ys,Ue,As,zs,Ne,Cs,Ds,va,ne,$a,S,R,Fe,ie,Ms,Ie,Os,_a,j,Ss,Re,Ls,Ts,pe,Hs,Us,Je,Ns,Fs,Ke,Is,Rs,xa,me,Ea,J,Js,Be,Ks,Bs,ja,ce,ka,K,Gs,Ge,Qs,Vs,wa,de,qa,L,B,Qe,fe,Ws,Ve,Xs,ba,y,Ys,We,Zs,et,Xe,at,st,Pa,A,tt,ue,ot,lt,Ye,rt,nt,ya,he,Aa,G,it,Ze,pt,mt,za,ge,Ca,T,Q,ea,ve,ct,aa,dt,Da,V,ft,sa,ut,ht,Ma,je,ke,gt,Oa,$e,Sa;return k=new ta({}),F=new uo({props:{$$slots:{default:[ho]},$$scope:{ctx:oa}}}),ee=new ta({}),se=new D({props:{code:`from transformers import pipeline generator = pipeline(task="text-generation")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-meta">&gt;&gt;&gt; </span>generator = pipeline(task=<span class="hljs-string">&quot;text-generation&quot;</span>)`}}),le=new D({props:{code:'generator("Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone")',highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>generator(<span class="hljs-string">&quot;Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone&quot;</span>) [{<span class="hljs-string">&#x27;generated_text&#x27;</span>: <span class="hljs-string">&#x27;Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone, Seven for the Iron-priests at the door to the east, and thirteen for the Lord Kings at the end of the mountain&#x27;</span>}]`}}),re=new D({props:{code:`generator( [ "Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone", "Nine for Mortal Men, doomed to die, One for the Dark Lord on his dark throne", ] )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>generator( <span class="hljs-meta">... </span> [ <span class="hljs-meta">... </span> <span class="hljs-string">&quot;Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone&quot;</span>, <span class="hljs-meta">... </span> <span class="hljs-string">&quot;Nine for Mortal Men, doomed to die, One for the Dark Lord on his dark throne&quot;</span>, <span class="hljs-meta">... </span> ] <span class="hljs-meta">... </span>)`}}),ne=new D({props:{code:`generator( "Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone", num_return_sequences=2, )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>generator( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone&quot;</span>, <span class="hljs-meta">... </span> num_return_sequences=<span class="hljs-number">2</span>, <span class="hljs-meta">... </span>)`}}),ie=new ta({}),me=new D({props:{code:`from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("distilgpt2") model = AutoModelForCausalLM.from_pretrained("distilgpt2")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, AutoModelForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilgpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForCausalLM.from_pretrained(<span class="hljs-string">&quot;distilgpt2&quot;</span>)`}}),ce=new D({props:{code:`from transformers import pipeline generator = pipeline(task="text-generation", model=model, tokenizer=tokenizer)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-meta">&gt;&gt;&gt; </span>generator = pipeline(task=<span class="hljs-string">&quot;text-generation&quot;</span>, model=model, tokenizer=tokenizer)`}}),de=new D({props:{code:'generator("Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone")',highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>generator(<span class="hljs-string">&quot;Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone&quot;</span>) [{<span class="hljs-string">&#x27;generated_text&#x27;</span>: <span class="hljs-string">&#x27;Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone, Seven for the Dragon-lords (for them to rule in a world ruled by their rulers, and all who live within the realm&#x27;</span>}]`}}),fe=new ta({}),he=new D({props:{code:`from transformers import pipeline audio_classifier = pipeline( task="audio-classification", model="ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition" )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-meta">&gt;&gt;&gt; </span>audio_classifier = pipeline( <span class="hljs-meta">... </span> task=<span class="hljs-string">&quot;audio-classification&quot;</span>, model=<span class="hljs-string">&quot;ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition&quot;</span> <span class="hljs-meta">... </span>)`}}),ge=new D({props:{code:'audio_classifier("jfk_moon_speech.wav")',highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>audio_classifier(<span class="hljs-string">&quot;jfk_moon_speech.wav&quot;</span>) [{<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;calm&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.13856211304664612</span>}, {<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;disgust&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.13148026168346405</span>}, {<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;happy&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.12635163962841034</span>}, {<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;angry&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.12439591437578201</span>}, {<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;fearful&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.12404385954141617</span>}]`}}),ve=new ta({}),$e=new D({props:{code:`from transformers import pipeline vision_classifier = pipeline(task="image-classification") vision_classifier( images="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-meta">&gt;&gt;&gt; </span>vision_classifier = pipeline(task=<span class="hljs-string">&quot;image-classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>vision_classifier( <span class="hljs-meta">... </span> images=<span class="hljs-string">&quot;https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg&quot;</span> <span class="hljs-meta">... </span>) [{<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;lynx, catamount&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.4403027892112732</span>}, {<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;cougar, puma, catamount, mountain lion, painter, panther, Felis concolor&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.03433405980467796</span>}, {<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;snow leopard, ounce, Panthera uncia&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.032148055732250214</span>}, {<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;Egyptian cat&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.02353910356760025</span>}, {<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;tiger cat&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.023034192621707916</span>}]`}}),{c(){f=r("meta"),H=m(),x=r("h1"),w=r("a"),M=r("span"),u(k.$$.fragment),b=m(),U=r("span"),Va=t("Pipelines para infer\xEAncia"),la=m(),N=r("p"),Wa=t("Um [pipeline] simplifica o uso dos modelos no "),X=r("a"),Xa=t("Model Hub"),Ya=t(` para a infer\xEAncia de uma diversidade de tarefas, como a gera\xE7\xE3o de texto, a segmenta\xE7\xE3o de imagens e a classifica\xE7\xE3o de \xE1udio. Inclusive, se n\xE3o tem experi\xEAncia com alguma modalidade espec\xEDfica ou n\xE3o compreende o c\xF3digo que forma os modelos, pode usar eles mesmo assim com o [pipeline]! Este tutorial te ensinar\xE1 a:`),ra=m(),P=r("ul"),Y=r("li"),Za=t("Utilizar um "),Pe=r("code"),es=t("pipeline()"),as=t(" para infer\xEAncia."),ss=m(),ye=r("li"),ts=t("Utilizar um tokenizador ou model espec\xEDfico."),os=m(),Z=r("li"),ls=t("Utilizar um "),Ae=r("code"),rs=t("pipeline()"),ns=t(" para tarefas de \xE1udio e vis\xE3o computacional."),na=m(),u(F.$$.fragment),ia=m(),O=r("h2"),I=r("a"),ze=r("span"),u(ee.$$.fragment),is=m(),Ce=r("span"),ps=t("Uso do pipeline"),pa=m(),q=r("p"),ms=t("Mesmo que cada tarefa tenha um "),De=r("code"),cs=t("pipeline()"),ds=t(" associado, \xE9 mais simples usar a abstra\xE7\xE3o geral do "),Me=r("code"),fs=t("pipeline()"),us=t(` que cont\xE9m todos os pipelines das tarefas mais espec\xEDficas. O `),Oe=r("code"),hs=t("pipeline()"),gs=t(` carrega automaticamenta um modelo predeterminado e um tokenizador com capacidade de infer\xEAncia para sua tarefa.`),ma=m(),xe=r("ol"),ae=r("li"),vs=t("Comece carregando um "),Se=r("code"),$s=t("pipeline()"),_s=t(" e especifique uma tarefa de infer\xEAncia:"),ca=m(),u(se.$$.fragment),da=m(),te=r("ol"),oe=r("li"),xs=t("Passe seu dado de entrada, no caso um texto, ao "),Le=r("code"),Es=t("pipeline()"),js=t(":"),fa=m(),u(le.$$.fragment),ua=m(),Ee=r("p"),ks=t("Se tiver mais de uma entrada, passe-a como uma lista:"),ha=m(),u(re.$$.fragment),ga=m(),E=r("p"),ws=t("Qualquer par\xE2metro adicional para a sua tarefa tamb\xE9m pode ser inclu\xEDdo no "),Te=r("code"),qs=t("pipeline()"),bs=t(". A tarefa "),He=r("code"),Ps=t("text-generation"),ys=t(` tem um m\xE9todo `),Ue=r("code"),As=t("generate()"),zs=t(` com v\xE1rios par\xE2metros para controlar a sa\xEDda. Por exemplo, se quiser gerar mais de uma sa\xEDda, defina-a no par\xE2metro `),Ne=r("code"),Cs=t("num_return_sequences"),Ds=t(":"),va=m(),u(ne.$$.fragment),$a=m(),S=r("h3"),R=r("a"),Fe=r("span"),u(ie.$$.fragment),Ms=m(),Ie=r("span"),Os=t("Selecionando um modelo e um tokenizador"),_a=m(),j=r("p"),Ss=t("O "),Re=r("code"),Ls=t("pipeline()"),Ts=t(" aceita qualquer modelo do "),pe=r("a"),Hs=t("Model Hub"),Us=t(`. H\xE1 r\xF3tulos adicionais no Model Hub que te permitem filtrar pelo modelo que gostaria de usar para sua tarefa. Uma vez que tiver escolhido o modelo apropriado, carregue-o com as classes `),Je=r("code"),Ns=t("AutoModelFor"),Fs=t(" e [`AutoTokenizer\u2019] correspondentes. Por exemplo, carregue a classe "),Ke=r("code"),Is=t("AutoModelForCausalLM"),Rs=t(` para uma tarefa de modelagem de linguagem causal:`),xa=m(),u(me.$$.fragment),Ea=m(),J=r("p"),Js=t("Crie uma "),Be=r("code"),Ks=t("pipeline()"),Bs=t(" para a sua tarefa e especif\xEDque o modelo e o tokenizador que foram carregados:"),ja=m(),u(ce.$$.fragment),ka=m(),K=r("p"),Gs=t("Passe seu texto de entrada ao "),Ge=r("code"),Qs=t("pipeline()"),Vs=t(" para gerar algum texto:"),wa=m(),u(de.$$.fragment),qa=m(),L=r("h2"),B=r("a"),Qe=r("span"),u(fe.$$.fragment),Ws=m(),Ve=r("span"),Xs=t("Pipeline de audio"),ba=m(),y=r("p"),Ys=t("A flexibilidade do "),We=r("code"),Zs=t("pipeline()"),et=t(` significa que tamb\xE9m pode-se extender \xE0s tarefas de \xE1udio. La flexibilidad de `),Xe=r("code"),at=t("pipeline()"),st=t(" significa que tambi\xE9n se puede extender a tareas de audio."),Pa=m(),A=r("p"),tt=t(`Por exemplo, classifiquemos a emo\xE7\xE3o de um breve fragmento do famoso discurso de John F. Kennedy /home/rzimmerdev/dev/transformers/docs/source/pt/pipeline_tutorial.mdx Encontre um modelo de `),ue=r("a"),ot=t("audio classification"),lt=t(` para reconhecimento de emo\xE7\xF5es no Model Hub e carregue-o usando o `),Ye=r("code"),rt=t("pipeline()"),nt=t(":"),ya=m(),u(he.$$.fragment),Aa=m(),G=r("p"),it=t("Passe o arquivo de \xE1udio ao "),Ze=r("code"),pt=t("pipeline()"),mt=t(":"),za=m(),u(ge.$$.fragment),Ca=m(),T=r("h2"),Q=r("a"),ea=r("span"),u(ve.$$.fragment),ct=m(),aa=r("span"),dt=t("Pipeline de vis\xE3o computacional"),Da=m(),V=r("p"),ft=t("Finalmente, utilizar um "),sa=r("code"),ut=t("pipeline()"),ht=t(` para tarefas de vis\xE3o \xE9 praticamente a mesma coisa. Especifique a sua tarefa de vis\xE3o e passe a sua imagem ao classificador. A imagem pode ser um link ou uma rota local \xE0 imagem. Por exemplo, que esp\xE9cie de gato est\xE1 presente na imagem?`),Ma=m(),je=r("p"),ke=r("img"),Oa=m(),u($e.$$.fragment),this.h()},l(e){const l=mo('[data-svelte="svelte-1phssyn"]',document.head);f=n(l,"META",{name:!0,content:!0}),l.forEach(s),H=c(e),x=n(e,"H1",{class:!0});var _e=i(x);w=n(_e,"A",{id:!0,class:!0,href:!0});var vt=i(w);M=n(vt,"SPAN",{});var $t=i(M);h(k.$$.fragment,$t),$t.forEach(s),vt.forEach(s),b=c(_e),U=n(_e,"SPAN",{});var _t=i(U);Va=o(_t,"Pipelines para infer\xEAncia"),_t.forEach(s),_e.forEach(s),la=c(e),N=n(e,"P",{});var La=i(N);Wa=o(La,"Um [pipeline] simplifica o uso dos modelos no "),X=n(La,"A",{href:!0,rel:!0});var xt=i(X);Xa=o(xt,"Model Hub"),xt.forEach(s),Ya=o(La,` para a infer\xEAncia de uma diversidade de tarefas, como a gera\xE7\xE3o de texto, a segmenta\xE7\xE3o de imagens e a classifica\xE7\xE3o de \xE1udio. Inclusive, se n\xE3o tem experi\xEAncia com alguma modalidade espec\xEDfica ou n\xE3o compreende o c\xF3digo que forma os modelos, pode usar eles mesmo assim com o [pipeline]! Este tutorial te ensinar\xE1 a:`),La.forEach(s),ra=c(e),P=n(e,"UL",{});var we=i(P);Y=n(we,"LI",{});var Ta=i(Y);Za=o(Ta,"Utilizar um "),Pe=n(Ta,"CODE",{});var Et=i(Pe);es=o(Et,"pipeline()"),Et.forEach(s),as=o(Ta," para infer\xEAncia."),Ta.forEach(s),ss=c(we),ye=n(we,"LI",{});var jt=i(ye);ts=o(jt,"Utilizar um tokenizador ou model espec\xEDfico."),jt.forEach(s),os=c(we),Z=n(we,"LI",{});var Ha=i(Z);ls=o(Ha,"Utilizar um "),Ae=n(Ha,"CODE",{});var kt=i(Ae);rs=o(kt,"pipeline()"),kt.forEach(s),ns=o(Ha," para tarefas de \xE1udio e vis\xE3o computacional."),Ha.forEach(s),we.forEach(s),na=c(e),h(F.$$.fragment,e),ia=c(e),O=n(e,"H2",{class:!0});var Ua=i(O);I=n(Ua,"A",{id:!0,class:!0,href:!0});var wt=i(I);ze=n(wt,"SPAN",{});var qt=i(ze);h(ee.$$.fragment,qt),qt.forEach(s),wt.forEach(s),is=c(Ua),Ce=n(Ua,"SPAN",{});var bt=i(Ce);ps=o(bt,"Uso do pipeline"),bt.forEach(s),Ua.forEach(s),pa=c(e),q=n(e,"P",{});var W=i(q);ms=o(W,"Mesmo que cada tarefa tenha um "),De=n(W,"CODE",{});var Pt=i(De);cs=o(Pt,"pipeline()"),Pt.forEach(s),ds=o(W," associado, \xE9 mais simples usar a abstra\xE7\xE3o geral do "),Me=n(W,"CODE",{});var yt=i(Me);fs=o(yt,"pipeline()"),yt.forEach(s),us=o(W,` que cont\xE9m todos os pipelines das tarefas mais espec\xEDficas. O `),Oe=n(W,"CODE",{});var At=i(Oe);hs=o(At,"pipeline()"),At.forEach(s),gs=o(W,` carrega automaticamenta um modelo predeterminado e um tokenizador com capacidade de infer\xEAncia para sua tarefa.`),W.forEach(s),ma=c(e),xe=n(e,"OL",{});var zt=i(xe);ae=n(zt,"LI",{});var Na=i(ae);vs=o(Na,"Comece carregando um "),Se=n(Na,"CODE",{});var Ct=i(Se);$s=o(Ct,"pipeline()"),Ct.forEach(s),_s=o(Na," e especifique uma tarefa de infer\xEAncia:"),Na.forEach(s),zt.forEach(s),ca=c(e),h(se.$$.fragment,e),da=c(e),te=n(e,"OL",{start:!0});var Dt=i(te);oe=n(Dt,"LI",{});var Fa=i(oe);xs=o(Fa,"Passe seu dado de entrada, no caso um texto, ao "),Le=n(Fa,"CODE",{});var Mt=i(Le);Es=o(Mt,"pipeline()"),Mt.forEach(s),js=o(Fa,":"),Fa.forEach(s),Dt.forEach(s),fa=c(e),h(le.$$.fragment,e),ua=c(e),Ee=n(e,"P",{});var Ot=i(Ee);ks=o(Ot,"Se tiver mais de uma entrada, passe-a como uma lista:"),Ot.forEach(s),ha=c(e),h(re.$$.fragment,e),ga=c(e),E=n(e,"P",{});var z=i(E);ws=o(z,"Qualquer par\xE2metro adicional para a sua tarefa tamb\xE9m pode ser inclu\xEDdo no "),Te=n(z,"CODE",{});var St=i(Te);qs=o(St,"pipeline()"),St.forEach(s),bs=o(z,". A tarefa "),He=n(z,"CODE",{});var Lt=i(He);Ps=o(Lt,"text-generation"),Lt.forEach(s),ys=o(z,` tem um m\xE9todo `),Ue=n(z,"CODE",{});var Tt=i(Ue);As=o(Tt,"generate()"),Tt.forEach(s),zs=o(z,` com v\xE1rios par\xE2metros para controlar a sa\xEDda. Por exemplo, se quiser gerar mais de uma sa\xEDda, defina-a no par\xE2metro `),Ne=n(z,"CODE",{});var Ht=i(Ne);Cs=o(Ht,"num_return_sequences"),Ht.forEach(s),Ds=o(z,":"),z.forEach(s),va=c(e),h(ne.$$.fragment,e),$a=c(e),S=n(e,"H3",{class:!0});var Ia=i(S);R=n(Ia,"A",{id:!0,class:!0,href:!0});var Ut=i(R);Fe=n(Ut,"SPAN",{});var Nt=i(Fe);h(ie.$$.fragment,Nt),Nt.forEach(s),Ut.forEach(s),Ms=c(Ia),Ie=n(Ia,"SPAN",{});var Ft=i(Ie);Os=o(Ft,"Selecionando um modelo e um tokenizador"),Ft.forEach(s),Ia.forEach(s),_a=c(e),j=n(e,"P",{});var C=i(j);Ss=o(C,"O "),Re=n(C,"CODE",{});var It=i(Re);Ls=o(It,"pipeline()"),It.forEach(s),Ts=o(C," aceita qualquer modelo do "),pe=n(C,"A",{href:!0,rel:!0});var Rt=i(pe);Hs=o(Rt,"Model Hub"),Rt.forEach(s),Us=o(C,`. H\xE1 r\xF3tulos adicionais no Model Hub que te permitem filtrar pelo modelo que gostaria de usar para sua tarefa. Uma vez que tiver escolhido o modelo apropriado, carregue-o com as classes `),Je=n(C,"CODE",{});var Jt=i(Je);Ns=o(Jt,"AutoModelFor"),Jt.forEach(s),Fs=o(C," e [`AutoTokenizer\u2019] correspondentes. Por exemplo, carregue a classe "),Ke=n(C,"CODE",{});var Kt=i(Ke);Is=o(Kt,"AutoModelForCausalLM"),Kt.forEach(s),Rs=o(C,` para uma tarefa de modelagem de linguagem causal:`),C.forEach(s),xa=c(e),h(me.$$.fragment,e),Ea=c(e),J=n(e,"P",{});var Ra=i(J);Js=o(Ra,"Crie uma "),Be=n(Ra,"CODE",{});var Bt=i(Be);Ks=o(Bt,"pipeline()"),Bt.forEach(s),Bs=o(Ra," para a sua tarefa e especif\xEDque o modelo e o tokenizador que foram carregados:"),Ra.forEach(s),ja=c(e),h(ce.$$.fragment,e),ka=c(e),K=n(e,"P",{});var Ja=i(K);Gs=o(Ja,"Passe seu texto de entrada ao "),Ge=n(Ja,"CODE",{});var Gt=i(Ge);Qs=o(Gt,"pipeline()"),Gt.forEach(s),Vs=o(Ja," para gerar algum texto:"),Ja.forEach(s),wa=c(e),h(de.$$.fragment,e),qa=c(e),L=n(e,"H2",{class:!0});var Ka=i(L);B=n(Ka,"A",{id:!0,class:!0,href:!0});var Qt=i(B);Qe=n(Qt,"SPAN",{});var Vt=i(Qe);h(fe.$$.fragment,Vt),Vt.forEach(s),Qt.forEach(s),Ws=c(Ka),Ve=n(Ka,"SPAN",{});var Wt=i(Ve);Xs=o(Wt,"Pipeline de audio"),Wt.forEach(s),Ka.forEach(s),ba=c(e),y=n(e,"P",{});var qe=i(y);Ys=o(qe,"A flexibilidade do "),We=n(qe,"CODE",{});var Xt=i(We);Zs=o(Xt,"pipeline()"),Xt.forEach(s),et=o(qe,` significa que tamb\xE9m pode-se extender \xE0s tarefas de \xE1udio. La flexibilidad de `),Xe=n(qe,"CODE",{});var Yt=i(Xe);at=o(Yt,"pipeline()"),Yt.forEach(s),st=o(qe," significa que tambi\xE9n se puede extender a tareas de audio."),qe.forEach(s),Pa=c(e),A=n(e,"P",{});var be=i(A);tt=o(be,`Por exemplo, classifiquemos a emo\xE7\xE3o de um breve fragmento do famoso discurso de John F. Kennedy /home/rzimmerdev/dev/transformers/docs/source/pt/pipeline_tutorial.mdx Encontre um modelo de `),ue=n(be,"A",{href:!0,rel:!0});var Zt=i(ue);ot=o(Zt,"audio classification"),Zt.forEach(s),lt=o(be,` para reconhecimento de emo\xE7\xF5es no Model Hub e carregue-o usando o `),Ye=n(be,"CODE",{});var eo=i(Ye);rt=o(eo,"pipeline()"),eo.forEach(s),nt=o(be,":"),be.forEach(s),ya=c(e),h(he.$$.fragment,e),Aa=c(e),G=n(e,"P",{});var Ba=i(G);it=o(Ba,"Passe o arquivo de \xE1udio ao "),Ze=n(Ba,"CODE",{});var ao=i(Ze);pt=o(ao,"pipeline()"),ao.forEach(s),mt=o(Ba,":"),Ba.forEach(s),za=c(e),h(ge.$$.fragment,e),Ca=c(e),T=n(e,"H2",{class:!0});var Ga=i(T);Q=n(Ga,"A",{id:!0,class:!0,href:!0});var so=i(Q);ea=n(so,"SPAN",{});var to=i(ea);h(ve.$$.fragment,to),to.forEach(s),so.forEach(s),ct=c(Ga),aa=n(Ga,"SPAN",{});var oo=i(aa);dt=o(oo,"Pipeline de vis\xE3o computacional"),oo.forEach(s),Ga.forEach(s),Da=c(e),V=n(e,"P",{});var Qa=i(V);ft=o(Qa,"Finalmente, utilizar um "),sa=n(Qa,"CODE",{});var lo=i(sa);ut=o(lo,"pipeline()"),lo.forEach(s),ht=o(Qa,` para tarefas de vis\xE3o \xE9 praticamente a mesma coisa. Especifique a sua tarefa de vis\xE3o e passe a sua imagem ao classificador. A imagem pode ser um link ou uma rota local \xE0 imagem. Por exemplo, que esp\xE9cie de gato est\xE1 presente na imagem?`),Qa.forEach(s),Ma=c(e),je=n(e,"P",{});var ro=i(je);ke=n(ro,"IMG",{src:!0,alt:!0}),ro.forEach(s),Oa=c(e),h($e.$$.fragment,e),this.h()},h(){d(f,"name","hf:doc:metadata"),d(f,"content",JSON.stringify(vo)),d(w,"id","pipelines-para-inferncia"),d(w,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(w,"href","#pipelines-para-inferncia"),d(x,"class","relative group"),d(X,"href","https://huggingface.co/models"),d(X,"rel","nofollow"),d(I,"id","uso-do-pipeline"),d(I,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(I,"href","#uso-do-pipeline"),d(O,"class","relative group"),d(te,"start","2"),d(R,"id","selecionando-um-modelo-e-um-tokenizador"),d(R,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(R,"href","#selecionando-um-modelo-e-um-tokenizador"),d(S,"class","relative group"),d(pe,"href","https://huggingface.co/models"),d(pe,"rel","nofollow"),d(B,"id","pipeline-de-audio"),d(B,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(B,"href","#pipeline-de-audio"),d(L,"class","relative group"),d(ue,"href","https://huggingface.co/models?pipeline_tag=audio-classification"),d(ue,"rel","nofollow"),d(Q,"id","pipeline-de-viso-computacional"),d(Q,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Q,"href","#pipeline-de-viso-computacional"),d(T,"class","relative group"),co(ke.src,gt="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg")||d(ke,"src",gt),d(ke,"alt","pipeline-cat-chonk")},m(e,l){a(document.head,f),p(e,H,l),p(e,x,l),a(x,w),a(w,M),g(k,M,null),a(x,b),a(x,U),a(U,Va),p(e,la,l),p(e,N,l),a(N,Wa),a(N,X),a(X,Xa),a(N,Ya),p(e,ra,l),p(e,P,l),a(P,Y),a(Y,Za),a(Y,Pe),a(Pe,es),a(Y,as),a(P,ss),a(P,ye),a(ye,ts),a(P,os),a(P,Z),a(Z,ls),a(Z,Ae),a(Ae,rs),a(Z,ns),p(e,na,l),g(F,e,l),p(e,ia,l),p(e,O,l),a(O,I),a(I,ze),g(ee,ze,null),a(O,is),a(O,Ce),a(Ce,ps),p(e,pa,l),p(e,q,l),a(q,ms),a(q,De),a(De,cs),a(q,ds),a(q,Me),a(Me,fs),a(q,us),a(q,Oe),a(Oe,hs),a(q,gs),p(e,ma,l),p(e,xe,l),a(xe,ae),a(ae,vs),a(ae,Se),a(Se,$s),a(ae,_s),p(e,ca,l),g(se,e,l),p(e,da,l),p(e,te,l),a(te,oe),a(oe,xs),a(oe,Le),a(Le,Es),a(oe,js),p(e,fa,l),g(le,e,l),p(e,ua,l),p(e,Ee,l),a(Ee,ks),p(e,ha,l),g(re,e,l),p(e,ga,l),p(e,E,l),a(E,ws),a(E,Te),a(Te,qs),a(E,bs),a(E,He),a(He,Ps),a(E,ys),a(E,Ue),a(Ue,As),a(E,zs),a(E,Ne),a(Ne,Cs),a(E,Ds),p(e,va,l),g(ne,e,l),p(e,$a,l),p(e,S,l),a(S,R),a(R,Fe),g(ie,Fe,null),a(S,Ms),a(S,Ie),a(Ie,Os),p(e,_a,l),p(e,j,l),a(j,Ss),a(j,Re),a(Re,Ls),a(j,Ts),a(j,pe),a(pe,Hs),a(j,Us),a(j,Je),a(Je,Ns),a(j,Fs),a(j,Ke),a(Ke,Is),a(j,Rs),p(e,xa,l),g(me,e,l),p(e,Ea,l),p(e,J,l),a(J,Js),a(J,Be),a(Be,Ks),a(J,Bs),p(e,ja,l),g(ce,e,l),p(e,ka,l),p(e,K,l),a(K,Gs),a(K,Ge),a(Ge,Qs),a(K,Vs),p(e,wa,l),g(de,e,l),p(e,qa,l),p(e,L,l),a(L,B),a(B,Qe),g(fe,Qe,null),a(L,Ws),a(L,Ve),a(Ve,Xs),p(e,ba,l),p(e,y,l),a(y,Ys),a(y,We),a(We,Zs),a(y,et),a(y,Xe),a(Xe,at),a(y,st),p(e,Pa,l),p(e,A,l),a(A,tt),a(A,ue),a(ue,ot),a(A,lt),a(A,Ye),a(Ye,rt),a(A,nt),p(e,ya,l),g(he,e,l),p(e,Aa,l),p(e,G,l),a(G,it),a(G,Ze),a(Ze,pt),a(G,mt),p(e,za,l),g(ge,e,l),p(e,Ca,l),p(e,T,l),a(T,Q),a(Q,ea),g(ve,ea,null),a(T,ct),a(T,aa),a(aa,dt),p(e,Da,l),p(e,V,l),a(V,ft),a(V,sa),a(sa,ut),a(V,ht),p(e,Ma,l),p(e,je,l),a(je,ke),p(e,Oa,l),g($e,e,l),Sa=!0},p(e,[l]){const _e={};l&2&&(_e.$$scope={dirty:l,ctx:e}),F.$set(_e)},i(e){Sa||(v(k.$$.fragment,e),v(F.$$.fragment,e),v(ee.$$.fragment,e),v(se.$$.fragment,e),v(le.$$.fragment,e),v(re.$$.fragment,e),v(ne.$$.fragment,e),v(ie.$$.fragment,e),v(me.$$.fragment,e),v(ce.$$.fragment,e),v(de.$$.fragment,e),v(fe.$$.fragment,e),v(he.$$.fragment,e),v(ge.$$.fragment,e),v(ve.$$.fragment,e),v($e.$$.fragment,e),Sa=!0)},o(e){$(k.$$.fragment,e),$(F.$$.fragment,e),$(ee.$$.fragment,e),$(se.$$.fragment,e),$(le.$$.fragment,e),$(re.$$.fragment,e),$(ne.$$.fragment,e),$(ie.$$.fragment,e),$(me.$$.fragment,e),$(ce.$$.fragment,e),$(de.$$.fragment,e),$(fe.$$.fragment,e),$(he.$$.fragment,e),$(ge.$$.fragment,e),$(ve.$$.fragment,e),$($e.$$.fragment,e),Sa=!1},d(e){s(f),e&&s(H),e&&s(x),_(k),e&&s(la),e&&s(N),e&&s(ra),e&&s(P),e&&s(na),_(F,e),e&&s(ia),e&&s(O),_(ee),e&&s(pa),e&&s(q),e&&s(ma),e&&s(xe),e&&s(ca),_(se,e),e&&s(da),e&&s(te),e&&s(fa),_(le,e),e&&s(ua),e&&s(Ee),e&&s(ha),_(re,e),e&&s(ga),e&&s(E),e&&s(va),_(ne,e),e&&s($a),e&&s(S),_(ie),e&&s(_a),e&&s(j),e&&s(xa),_(me,e),e&&s(Ea),e&&s(J),e&&s(ja),_(ce,e),e&&s(ka),e&&s(K),e&&s(wa),_(de,e),e&&s(qa),e&&s(L),_(fe),e&&s(ba),e&&s(y),e&&s(Pa),e&&s(A),e&&s(ya),_(he,e),e&&s(Aa),e&&s(G),e&&s(za),_(ge,e),e&&s(Ca),e&&s(T),_(ve),e&&s(Da),e&&s(V),e&&s(Ma),e&&s(je),e&&s(Oa),_($e,e)}}}const vo={local:"pipelines-para-inferncia",sections:[{local:"uso-do-pipeline",sections:[{local:"selecionando-um-modelo-e-um-tokenizador",title:"Selecionando um modelo e um tokenizador"}],title:"Uso do pipeline"},{local:"pipeline-de-audio",title:"Pipeline de audio"},{local:"pipeline-de-viso-computacional",title:"Pipeline de vis\xE3o computacional"}],title:"Pipelines para infer\xEAncia"};function $o(oa){return fo(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class ko extends no{constructor(f){super();io(this,f,$o,go,po,{})}}export{ko as default,vo as metadata};
483
0
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/_app
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/_app/pages/create_a_model.mdx-hf-doc-builder.js
import{S as wn,i as zn,s as xn,e as r,k as c,w as j,t as o,M as Dn,c as i,d as s,m as f,a as n,x as k,h as t,b,G as a,g as p,y as w,q as z,o as x,B as D,v as Cn,L as Qo}from"../chunks/vendor-hf-doc-builder.js";import{T as Lo}from"../chunks/Tip-hf-doc-builder.js";import{I as da}from"../chunks/IconCopyLink-hf-doc-builder.js";import{C as I}from"../chunks/CodeBlock-hf-doc-builder.js";import{F as kn,M as Uo}from"../chunks/Markdown-hf-doc-builder.js";function yn(S){let m,g,u,_,q;return{c(){m=r("p"),g=o("Voc\xEA pode tamb\xE9m salvar seu arquivo de configura\xE7\xF5es como um dicion\xE1rio ou at\xE9 mesmo com a diferen\xE7a entre as seus atributos de configura\xE7\xE3o customizados e os atributos de configura\xE7\xE3o padr\xF5es! Olhe a documenta\xE7\xE3o "),u=r("a"),_=o("configuration"),q=o(" para mais detalhes."),this.h()},l(v){m=i(v,"P",{});var $=n(m);g=t($,"Voc\xEA pode tamb\xE9m salvar seu arquivo de configura\xE7\xF5es como um dicion\xE1rio ou at\xE9 mesmo com a diferen\xE7a entre as seus atributos de configura\xE7\xE3o customizados e os atributos de configura\xE7\xE3o padr\xF5es! Olhe a documenta\xE7\xE3o "),u=i($,"A",{href:!0});var y=n(u);_=t(y,"configuration"),y.forEach(s),q=t($," para mais detalhes."),$.forEach(s),this.h()},h(){b(u,"href","main_classes/configuration")},m(v,$){p(v,m,$),a(m,g),a(m,u),a(u,_),a(m,q)},d(v){v&&s(m)}}}function Tn(S){let m,g,u,_,q,v,$,y,E,N,C,M,B,O,T,V,h,F,R,A,L;return _=new I({props:{code:`from transformers import DistilBertModel my_config = DistilBertConfig.from_pretrained("./your_model_save_path/my_config.json") model = DistilBertModel(my_config)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DistilBertModel <span class="hljs-meta">&gt;&gt;&gt; </span>my_config = DistilBertConfig.from_pretrained(<span class="hljs-string">&quot;./your_model_save_path/my_config.json&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = DistilBertModel(my_config)`}}),T=new I({props:{code:'model = DistilBertModel.from_pretrained("distilbert-base-uncased")',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>model = DistilBertModel.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)'}}),A=new I({props:{code:'model = DistilBertModel.from_pretrained("distilbert-base-uncased", config=my_config)',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>model = DistilBertModel.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>, config=my_config)'}}),{c(){m=r("p"),g=o("Carregar seus atributos de configura\xE7\xE3o customizados em um modelo:"),u=c(),j(_.$$.fragment),q=c(),v=r("p"),$=o("Isso cria um modelo com valores aleat\xF3rios ao inv\xE9s de pr\xE9-treinar os pesos. Voc\xEA n\xE3o ir\xE1 conseguir usar usar esse modelo para nada \xFAtil ainda, at\xE9 voc\xEA treinar ele. Treino \xE9 um processo caro e demorado. Geralmente \xE9 melhor utilizar um modelo pr\xE9-treinado para obter melhores resultados mais r\xE1pido, enquanto usa apenas uma fra\xE7\xE3o dos recursos necess\xE1rios para treinar."),y=c(),E=r("p"),N=o("Criar um modelo pr\xE9-treinado com "),C=r("code"),M=o("from_pretrained()"),B=o(":"),O=c(),j(T.$$.fragment),V=c(),h=r("p"),F=o("Quando voc\xEA carregar os pesos pr\xE9-treinados, a configura\xE7\xE3o padr\xE3o do modelo \xE9 automaticamente carregada se o modelo \xE9 provido pelo \u{1F917} Transformers. No entanto, voc\xEA ainda consegue mudar - alguns ou todos - os atributos padr\xF5es de configura\xE7\xE3o do modelo com os seus pr\xF3prio atributos, se voc\xEA preferir:"),R=c(),j(A.$$.fragment)},l(d){m=i(d,"P",{});var P=n(m);g=t(P,"Carregar seus atributos de configura\xE7\xE3o customizados em um modelo:"),P.forEach(s),u=f(d),k(_.$$.fragment,d),q=f(d),v=i(d,"P",{});var W=n(v);$=t(W,"Isso cria um modelo com valores aleat\xF3rios ao inv\xE9s de pr\xE9-treinar os pesos. Voc\xEA n\xE3o ir\xE1 conseguir usar usar esse modelo para nada \xFAtil ainda, at\xE9 voc\xEA treinar ele. Treino \xE9 um processo caro e demorado. Geralmente \xE9 melhor utilizar um modelo pr\xE9-treinado para obter melhores resultados mais r\xE1pido, enquanto usa apenas uma fra\xE7\xE3o dos recursos necess\xE1rios para treinar."),W.forEach(s),y=f(d),E=i(d,"P",{});var H=n(E);N=t(H,"Criar um modelo pr\xE9-treinado com "),C=i(H,"CODE",{});var se=n(C);M=t(se,"from_pretrained()"),se.forEach(s),B=t(H,":"),H.forEach(s),O=f(d),k(T.$$.fragment,d),V=f(d),h=i(d,"P",{});var oe=n(h);F=t(oe,"Quando voc\xEA carregar os pesos pr\xE9-treinados, a configura\xE7\xE3o padr\xE3o do modelo \xE9 automaticamente carregada se o modelo \xE9 provido pelo \u{1F917} Transformers. No entanto, voc\xEA ainda consegue mudar - alguns ou todos - os atributos padr\xF5es de configura\xE7\xE3o do modelo com os seus pr\xF3prio atributos, se voc\xEA preferir:"),oe.forEach(s),R=f(d),k(A.$$.fragment,d)},m(d,P){p(d,m,P),a(m,g),p(d,u,P),w(_,d,P),p(d,q,P),p(d,v,P),a(v,$),p(d,y,P),p(d,E,P),a(E,N),a(E,C),a(C,M),a(E,B),p(d,O,P),w(T,d,P),p(d,V,P),p(d,h,P),a(h,F),p(d,R,P),w(A,d,P),L=!0},p:Qo,i(d){L||(z(_.$$.fragment,d),z(T.$$.fragment,d),z(A.$$.fragment,d),L=!0)},o(d){x(_.$$.fragment,d),x(T.$$.fragment,d),x(A.$$.fragment,d),L=!1},d(d){d&&s(m),d&&s(u),D(_,d),d&&s(q),d&&s(v),d&&s(y),d&&s(E),d&&s(O),D(T,d),d&&s(V),d&&s(h),d&&s(R),D(A,d)}}}function Pn(S){let m,g;return m=new Uo({props:{$$slots:{default:[Tn]},$$scope:{ctx:S}}}),{c(){j(m.$$.fragment)},l(u){k(m.$$.fragment,u)},m(u,_){w(m,u,_),g=!0},p(u,_){const q={};_&2&&(q.$$scope={dirty:_,ctx:u}),m.$set(q)},i(u){g||(z(m.$$.fragment,u),g=!0)},o(u){x(m.$$.fragment,u),g=!1},d(u){D(m,u)}}}function Fn(S){let m,g,u,_,q,v,$,y,E,N,C,M,B,O,T,V,h,F,R,A,L;return _=new I({props:{code:`from transformers import TFDistilBertModel my_config = DistilBertConfig.from_pretrained("./your_model_save_path/my_config.json") tf_model = TFDistilBertModel(my_config)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFDistilBertModel <span class="hljs-meta">&gt;&gt;&gt; </span>my_config = DistilBertConfig.from_pretrained(<span class="hljs-string">&quot;./your_model_save_path/my_config.json&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_model = TFDistilBertModel(my_config)`}}),T=new I({props:{code:'tf_model = TFDistilBertModel.from_pretrained("distilbert-base-uncased")',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>tf_model = TFDistilBertModel.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)'}}),A=new I({props:{code:'tf_model = TFDistilBertModel.from_pretrained("distilbert-base-uncased", config=my_config)',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>tf_model = TFDistilBertModel.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>, config=my_config)'}}),{c(){m=r("p"),g=o("Carregar os seus pr\xF3prios atributos padr\xF5es de contigura\xE7\xE3o no modelo:"),u=c(),j(_.$$.fragment),q=c(),v=r("p"),$=o("Isso cria um modelo com valores aleat\xF3rios ao inv\xE9s de pr\xE9-treinar os pesos. Voc\xEA n\xE3o ir\xE1 conseguir usar usar esse modelo para nada \xFAtil ainda, at\xE9 voc\xEA treinar ele. Treino \xE9 um processo caro e demorado. Geralmente \xE9 melhor utilizar um modelo pr\xE9-treinado para obter melhores resultados mais r\xE1pido, enquanto usa apenas uma fra\xE7\xE3o dos recursos necess\xE1rios para treinar."),y=c(),E=r("p"),N=o("Criar um modelo pr\xE9-treinado com "),C=r("code"),M=o("from_pretrained()"),B=o(":"),O=c(),j(T.$$.fragment),V=c(),h=r("p"),F=o("Quando voc\xEA carregar os pesos pr\xE9-treinados, a configura\xE7\xE3o padr\xE3o do modelo \xE9 automaticamente carregada se o modelo \xE9 provido pelo \u{1F917} Transformers. No entanto, voc\xEA ainda consegue mudar - alguns ou todos - os atributos padr\xF5es de configura\xE7\xE3o do modelo com os seus pr\xF3prio atributos, se voc\xEA preferir:"),R=c(),j(A.$$.fragment)},l(d){m=i(d,"P",{});var P=n(m);g=t(P,"Carregar os seus pr\xF3prios atributos padr\xF5es de contigura\xE7\xE3o no modelo:"),P.forEach(s),u=f(d),k(_.$$.fragment,d),q=f(d),v=i(d,"P",{});var W=n(v);$=t(W,"Isso cria um modelo com valores aleat\xF3rios ao inv\xE9s de pr\xE9-treinar os pesos. Voc\xEA n\xE3o ir\xE1 conseguir usar usar esse modelo para nada \xFAtil ainda, at\xE9 voc\xEA treinar ele. Treino \xE9 um processo caro e demorado. Geralmente \xE9 melhor utilizar um modelo pr\xE9-treinado para obter melhores resultados mais r\xE1pido, enquanto usa apenas uma fra\xE7\xE3o dos recursos necess\xE1rios para treinar."),W.forEach(s),y=f(d),E=i(d,"P",{});var H=n(E);N=t(H,"Criar um modelo pr\xE9-treinado com "),C=i(H,"CODE",{});var se=n(C);M=t(se,"from_pretrained()"),se.forEach(s),B=t(H,":"),H.forEach(s),O=f(d),k(T.$$.fragment,d),V=f(d),h=i(d,"P",{});var oe=n(h);F=t(oe,"Quando voc\xEA carregar os pesos pr\xE9-treinados, a configura\xE7\xE3o padr\xE3o do modelo \xE9 automaticamente carregada se o modelo \xE9 provido pelo \u{1F917} Transformers. No entanto, voc\xEA ainda consegue mudar - alguns ou todos - os atributos padr\xF5es de configura\xE7\xE3o do modelo com os seus pr\xF3prio atributos, se voc\xEA preferir:"),oe.forEach(s),R=f(d),k(A.$$.fragment,d)},m(d,P){p(d,m,P),a(m,g),p(d,u,P),w(_,d,P),p(d,q,P),p(d,v,P),a(v,$),p(d,y,P),p(d,E,P),a(E,N),a(E,C),a(C,M),a(E,B),p(d,O,P),w(T,d,P),p(d,V,P),p(d,h,P),a(h,F),p(d,R,P),w(A,d,P),L=!0},p:Qo,i(d){L||(z(_.$$.fragment,d),z(T.$$.fragment,d),z(A.$$.fragment,d),L=!0)},o(d){x(_.$$.fragment,d),x(T.$$.fragment,d),x(A.$$.fragment,d),L=!1},d(d){d&&s(m),d&&s(u),D(_,d),d&&s(q),d&&s(v),d&&s(y),d&&s(E),d&&s(O),D(T,d),d&&s(V),d&&s(h),d&&s(R),D(A,d)}}}function Bn(S){let m,g;return m=new Uo({props:{$$slots:{default:[Fn]},$$scope:{ctx:S}}}),{c(){j(m.$$.fragment)},l(u){k(m.$$.fragment,u)},m(u,_){w(m,u,_),g=!0},p(u,_){const q={};_&2&&(q.$$scope={dirty:_,ctx:u}),m.$set(q)},i(u){g||(z(m.$$.fragment,u),g=!0)},o(u){x(m.$$.fragment,u),g=!1},d(u){D(m,u)}}}function An(S){let m,g,u,_,q,v,$,y,E,N,C,M,B,O,T,V;return $=new I({props:{code:`from transformers import DistilBertForSequenceClassification model = DistilBertForSequenceClassification.from_pretrained("distilbert-base-uncased")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DistilBertForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model = DistilBertForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)`}}),T=new I({props:{code:`from transformers import DistilBertForQuestionAnswering model = DistilBertForQuestionAnswering.from_pretrained("distilbert-base-uncased")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DistilBertForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span>model = DistilBertForQuestionAnswering.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)`}}),{c(){m=r("p"),g=o("Por exemplo, "),u=r("code"),_=o("DistilBertForSequenceClassification"),q=o(" \xE9 um modelo DistilBERT base com uma head de classifica\xE7\xE3o de sequ\xEAncia. A head de calssifica\xE7\xE3o de sequ\xEAncia \xE9 uma camada linear no topo das sa\xEDdas agrupadas."),v=c(),j($.$$.fragment),y=c(),E=r("p"),N=o("Reutilize facilmente esse ponto de parada para outra tarefe mudando para uma head de modelo diferente. Para uma tarefe de responder quest\xF5es, voc\xEA usaria a head do modelo "),C=r("code"),M=o("DistilBertForQuestionAnswering"),B=o(". A head de responder quest\xF5es \xE9 similar com a de classifica\xE7\xE3o de sequ\xEAncias exceto o fato de que ela \xE9 uma camada no topo dos estados das sa\xEDdas ocultas."),O=c(),j(T.$$.fragment)},l(h){m=i(h,"P",{});var F=n(m);g=t(F,"Por exemplo, "),u=i(F,"CODE",{});var R=n(u);_=t(R,"DistilBertForSequenceClassification"),R.forEach(s),q=t(F," \xE9 um modelo DistilBERT base com uma head de classifica\xE7\xE3o de sequ\xEAncia. A head de calssifica\xE7\xE3o de sequ\xEAncia \xE9 uma camada linear no topo das sa\xEDdas agrupadas."),F.forEach(s),v=f(h),k($.$$.fragment,h),y=f(h),E=i(h,"P",{});var A=n(E);N=t(A,"Reutilize facilmente esse ponto de parada para outra tarefe mudando para uma head de modelo diferente. Para uma tarefe de responder quest\xF5es, voc\xEA usaria a head do modelo "),C=i(A,"CODE",{});var L=n(C);M=t(L,"DistilBertForQuestionAnswering"),L.forEach(s),B=t(A,". A head de responder quest\xF5es \xE9 similar com a de classifica\xE7\xE3o de sequ\xEAncias exceto o fato de que ela \xE9 uma camada no topo dos estados das sa\xEDdas ocultas."),A.forEach(s),O=f(h),k(T.$$.fragment,h)},m(h,F){p(h,m,F),a(m,g),a(m,u),a(u,_),a(m,q),p(h,v,F),w($,h,F),p(h,y,F),p(h,E,F),a(E,N),a(E,C),a(C,M),a(E,B),p(h,O,F),w(T,h,F),V=!0},p:Qo,i(h){V||(z($.$$.fragment,h),z(T.$$.fragment,h),V=!0)},o(h){x($.$$.fragment,h),x(T.$$.fragment,h),V=!1},d(h){h&&s(m),h&&s(v),D($,h),h&&s(y),h&&s(E),h&&s(O),D(T,h)}}}function On(S){let m,g;return m=new Uo({props:{$$slots:{default:[An]},$$scope:{ctx:S}}}),{c(){j(m.$$.fragment)},l(u){k(m.$$.fragment,u)},m(u,_){w(m,u,_),g=!0},p(u,_){const q={};_&2&&(q.$$scope={dirty:_,ctx:u}),m.$set(q)},i(u){g||(z(m.$$.fragment,u),g=!0)},o(u){x(m.$$.fragment,u),g=!1},d(u){D(m,u)}}}function Vn(S){let m,g,u,_,q,v,$,y,E,N,C,M,B,O,T,V;return $=new I({props:{code:`from transformers import TFDistilBertForSequenceClassification tf_model = TFDistilBertForSequenceClassification.from_pretrained("distilbert-base-uncased")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFDistilBertForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>tf_model = TFDistilBertForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)`}}),T=new I({props:{code:`from transformers import TFDistilBertForQuestionAnswering tf_model = TFDistilBertForQuestionAnswering.from_pretrained("distilbert-base-uncased")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFDistilBertForQuestionAnswering <span class="hljs-meta">&gt;&gt;&gt; </span>tf_model = TFDistilBertForQuestionAnswering.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)`}}),{c(){m=r("p"),g=o("Por exemplo, "),u=r("code"),_=o("TFDistilBertForSequenceClassification"),q=o(" \xE9 um modelo DistilBERT base com uma head de classifica\xE7\xE3o de sequ\xEAncia. A head de calssifica\xE7\xE3o de sequ\xEAncia \xE9 uma camada linear no topo das sa\xEDdas agrupadas."),v=c(),j($.$$.fragment),y=c(),E=r("p"),N=o("Reutilize facilmente esse ponto de parada para outra tarefe mudando para uma head de modelo diferente. Para uma tarefe de responder quest\xF5es, voc\xEA usaria a head do modelo "),C=r("code"),M=o("TFDistilBertForQuestionAnswering"),B=o(". A head de responder quest\xF5es \xE9 similar com a de classifica\xE7\xE3o de sequ\xEAncias exceto o fato de que ela \xE9 uma camada no topo dos estados das sa\xEDdas ocultas."),O=c(),j(T.$$.fragment)},l(h){m=i(h,"P",{});var F=n(m);g=t(F,"Por exemplo, "),u=i(F,"CODE",{});var R=n(u);_=t(R,"TFDistilBertForSequenceClassification"),R.forEach(s),q=t(F," \xE9 um modelo DistilBERT base com uma head de classifica\xE7\xE3o de sequ\xEAncia. A head de calssifica\xE7\xE3o de sequ\xEAncia \xE9 uma camada linear no topo das sa\xEDdas agrupadas."),F.forEach(s),v=f(h),k($.$$.fragment,h),y=f(h),E=i(h,"P",{});var A=n(E);N=t(A,"Reutilize facilmente esse ponto de parada para outra tarefe mudando para uma head de modelo diferente. Para uma tarefe de responder quest\xF5es, voc\xEA usaria a head do modelo "),C=i(A,"CODE",{});var L=n(C);M=t(L,"TFDistilBertForQuestionAnswering"),L.forEach(s),B=t(A,". A head de responder quest\xF5es \xE9 similar com a de classifica\xE7\xE3o de sequ\xEAncias exceto o fato de que ela \xE9 uma camada no topo dos estados das sa\xEDdas ocultas."),A.forEach(s),O=f(h),k(T.$$.fragment,h)},m(h,F){p(h,m,F),a(m,g),a(m,u),a(u,_),a(m,q),p(h,v,F),w($,h,F),p(h,y,F),p(h,E,F),a(E,N),a(E,C),a(C,M),a(E,B),p(h,O,F),w(T,h,F),V=!0},p:Qo,i(h){V||(z($.$$.fragment,h),z(T.$$.fragment,h),V=!0)},o(h){x($.$$.fragment,h),x(T.$$.fragment,h),V=!1},d(h){h&&s(m),h&&s(v),D($,h),h&&s(y),h&&s(E),h&&s(O),D(T,h)}}}function Mn(S){let m,g;return m=new Uo({props:{$$slots:{default:[Vn]},$$scope:{ctx:S}}}),{c(){j(m.$$.fragment)},l(u){k(m.$$.fragment,u)},m(u,_){w(m,u,_),g=!0},p(u,_){const q={};_&2&&(q.$$scope={dirty:_,ctx:u}),m.$set(q)},i(u){g||(z(m.$$.fragment,u),g=!0)},o(u){x(m.$$.fragment,u),g=!1},d(u){D(m,u)}}}function Sn(S){let m,g,u,_,q;return{c(){m=r("p"),g=o("Nem todo modelo suporta um \u2018fast tokenizer\u2019. De uma olhada aqui "),u=r("a"),_=o("table"),q=o(" pra checar se um modelo suporta \u2018fast tokenizer\u2019."),this.h()},l(v){m=i(v,"P",{});var $=n(m);g=t($,"Nem todo modelo suporta um \u2018fast tokenizer\u2019. De uma olhada aqui "),u=i($,"A",{href:!0});var y=n(u);_=t(y,"table"),y.forEach(s),q=t($," pra checar se um modelo suporta \u2018fast tokenizer\u2019."),$.forEach(s),this.h()},h(){b(u,"href","index#supported-frameworks")},m(v,$){p(v,m,$),a(m,g),a(m,u),a(u,_),a(m,q)},d(v){v&&s(m)}}}function Nn(S){let m,g,u,_,q,v,$,y,E,N,C;return{c(){m=r("p"),g=o("Pos padr\xE3o, "),u=r("code"),_=o("AutoTokenizer"),q=o(" tentar\xE1 carregar um \u2018fast tokenizer\u2019. Voc\xEA pode disabilitar esse comportamento colocando "),v=r("code"),$=o("use_fast=False"),y=o(" no "),E=r("code"),N=o("from_pretrained"),C=o(".")},l(M){m=i(M,"P",{});var B=n(m);g=t(B,"Pos padr\xE3o, "),u=i(B,"CODE",{});var O=n(u);_=t(O,"AutoTokenizer"),O.forEach(s),q=t(B," tentar\xE1 carregar um \u2018fast tokenizer\u2019. Voc\xEA pode disabilitar esse comportamento colocando "),v=i(B,"CODE",{});var T=n(v);$=t(T,"use_fast=False"),T.forEach(s),y=t(B," no "),E=i(B,"CODE",{});var V=n(E);N=t(V,"from_pretrained"),V.forEach(s),C=t(B,"."),B.forEach(s)},m(M,B){p(M,m,B),a(m,g),a(m,u),a(u,_),a(m,q),a(m,v),a(v,$),a(m,y),a(m,E),a(E,N),a(m,C)},d(M){M&&s(m)}}}function In(S){let m,g,u,_,q;return{c(){m=r("p"),g=o("Se voc\xEA n\xE3o estiver procurando por nenhuma customiza\xE7\xE3o, apenas use o m\xE9todo "),u=r("code"),_=o("from_pretrained"),q=o(" para carregar par\xE2metros do modelo de extrator de features padr\xE3o.")},l(v){m=i(v,"P",{});var $=n(m);g=t($,"Se voc\xEA n\xE3o estiver procurando por nenhuma customiza\xE7\xE3o, apenas use o m\xE9todo "),u=i($,"CODE",{});var y=n(u);_=t(y,"from_pretrained"),y.forEach(s),q=t($," para carregar par\xE2metros do modelo de extrator de features padr\xE3o."),$.forEach(s)},m(v,$){p(v,m,$),a(m,g),a(m,u),a(u,_),a(m,q)},d(v){v&&s(m)}}}function Wn(S){let m,g,u,_,q,v,$,y,E,N,C,M,B,O,T,V,h,F,R,A,L,d,P,W,H,se,oe,wa,Ho,Go,za,Jo,Xo,xa,Ko,Yo,Da,Zo,ks,te,ue,Ca,Me,et,ya,at,ws,U,st,ca,ot,tt,Ta,rt,it,Pa,nt,lt,Fa,pt,mt,Ba,ut,dt,zs,Y,ct,fa,ft,ht,Aa,_t,gt,xs,Se,Ds,re,Oa,vt,$t,Va,qt,bt,Cs,de,Ne,Et,Ma,jt,kt,wt,Ie,zt,Sa,xt,Dt,ys,We,Ts,ce,Ct,Na,yt,Tt,Ps,Re,Fs,fe,Pt,Ia,Ft,Bt,Bs,Le,As,he,At,Wa,Ot,Vt,Os,Qe,Vs,_e,Ms,ie,ge,Ra,Ue,Mt,La,St,Ss,Q,Nt,ha,It,Wt,Qa,Rt,Lt,Ua,Qt,Ut,He,Ha,Ht,Gt,Ge,Ga,Jt,Xt,Je,Ja,Kt,Yt,Ns,ve,Is,ne,$e,Xa,Xe,Zt,Ka,er,Ws,qe,ar,Ya,sr,or,Rs,be,Ls,le,Ee,Za,Ke,tr,es,rr,Qs,je,ir,_a,nr,lr,Us,ke,ga,as,pr,mr,ur,Z,ss,dr,cr,Ye,fr,hr,os,_r,gr,Hs,va,vr,Gs,we,Js,ze,$r,ts,qr,br,Xs,Ze,Ks,xe,Er,rs,jr,kr,Ys,ea,Zs,De,wr,is,zr,xr,eo,aa,ao,Ce,so,pe,ye,ns,sa,Dr,ls,Cr,oo,J,yr,ps,Tr,Pr,ms,Fr,Br,us,Ar,Or,to,ee,Vr,ds,Mr,Sr,$a,Nr,Ir,ro,oa,io,Te,no,Pe,Wr,cs,Rr,Lr,lo,ta,po,Fe,Qr,fs,Ur,Hr,mo,ra,uo,me,Be,hs,ia,Gr,_s,Jr,co,Ae,Xr,gs,Kr,Yr,fo,qa,Zr,ho,na,_o,ba,ei,go,la,vo,Oe,ai,vs,si,oi,$o,pa,qo,Ea,ti,bo;return v=new da({}),Me=new da({}),Se=new I({props:{code:`from transformers import DistilBertConfig config = DistilBertConfig() print(config)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DistilBertConfig <span class="hljs-meta">&gt;&gt;&gt; </span>config = DistilBertConfig() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(config) DistilBertConfig { <span class="hljs-string">&quot;activation&quot;</span>: <span class="hljs-string">&quot;gelu&quot;</span>, <span class="hljs-string">&quot;attention_dropout&quot;</span>: <span class="hljs-number">0.1</span>, <span class="hljs-string">&quot;dim&quot;</span>: <span class="hljs-number">768</span>, <span class="hljs-string">&quot;dropout&quot;</span>: <span class="hljs-number">0.1</span>, <span class="hljs-string">&quot;hidden_dim&quot;</span>: <span class="hljs-number">3072</span>, <span class="hljs-string">&quot;initializer_range&quot;</span>: <span class="hljs-number">0.02</span>, <span class="hljs-string">&quot;max_position_embeddings&quot;</span>: <span class="hljs-number">512</span>, <span class="hljs-string">&quot;model_type&quot;</span>: <span class="hljs-string">&quot;distilbert&quot;</span>, <span class="hljs-string">&quot;n_heads&quot;</span>: <span class="hljs-number">12</span>, <span class="hljs-string">&quot;n_layers&quot;</span>: <span class="hljs-number">6</span>, <span class="hljs-string">&quot;pad_token_id&quot;</span>: <span class="hljs-number">0</span>, <span class="hljs-string">&quot;qa_dropout&quot;</span>: <span class="hljs-number">0.1</span>, <span class="hljs-string">&quot;seq_classif_dropout&quot;</span>: <span class="hljs-number">0.2</span>, <span class="hljs-string">&quot;sinusoidal_pos_embds&quot;</span>: false, <span class="hljs-string">&quot;transformers_version&quot;</span>: <span class="hljs-string">&quot;4.16.2&quot;</span>, <span class="hljs-string">&quot;vocab_size&quot;</span>: <span class="hljs-number">30522</span> }`}}),We=new I({props:{code:`my_config = DistilBertConfig(activation="relu", attention_dropout=0.4) print(my_config)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>my_config = DistilBertConfig(activation=<span class="hljs-string">&quot;relu&quot;</span>, attention_dropout=<span class="hljs-number">0.4</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(my_config) DistilBertConfig { <span class="hljs-string">&quot;activation&quot;</span>: <span class="hljs-string">&quot;relu&quot;</span>, <span class="hljs-string">&quot;attention_dropout&quot;</span>: <span class="hljs-number">0.4</span>, <span class="hljs-string">&quot;dim&quot;</span>: <span class="hljs-number">768</span>, <span class="hljs-string">&quot;dropout&quot;</span>: <span class="hljs-number">0.1</span>, <span class="hljs-string">&quot;hidden_dim&quot;</span>: <span class="hljs-number">3072</span>, <span class="hljs-string">&quot;initializer_range&quot;</span>: <span class="hljs-number">0.02</span>, <span class="hljs-string">&quot;max_position_embeddings&quot;</span>: <span class="hljs-number">512</span>, <span class="hljs-string">&quot;model_type&quot;</span>: <span class="hljs-string">&quot;distilbert&quot;</span>, <span class="hljs-string">&quot;n_heads&quot;</span>: <span class="hljs-number">12</span>, <span class="hljs-string">&quot;n_layers&quot;</span>: <span class="hljs-number">6</span>, <span class="hljs-string">&quot;pad_token_id&quot;</span>: <span class="hljs-number">0</span>, <span class="hljs-string">&quot;qa_dropout&quot;</span>: <span class="hljs-number">0.1</span>, <span class="hljs-string">&quot;seq_classif_dropout&quot;</span>: <span class="hljs-number">0.2</span>, <span class="hljs-string">&quot;sinusoidal_pos_embds&quot;</span>: false, <span class="hljs-string">&quot;transformers_version&quot;</span>: <span class="hljs-string">&quot;4.16.2&quot;</span>, <span class="hljs-string">&quot;vocab_size&quot;</span>: <span class="hljs-number">30522</span> }`}}),Re=new I({props:{code:'my_config = DistilBertConfig.from_pretrained("distilbert-base-uncased", activation="relu", attention_dropout=0.4)',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>my_config = DistilBertConfig.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>, activation=<span class="hljs-string">&quot;relu&quot;</span>, attention_dropout=<span class="hljs-number">0.4</span>)'}}),Le=new I({props:{code:'my_config.save_pretrained(save_directory="./your_model_save_path")',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>my_config.save_pretrained(save_directory=<span class="hljs-string">&quot;./your_model_save_path&quot;</span>)'}}),Qe=new I({props:{code:'my_config = DistilBertConfig.from_pretrained("./your_model_save_path/my_config.json")',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>my_config = DistilBertConfig.from_pretrained(<span class="hljs-string">&quot;./your_model_save_path/my_config.json&quot;</span>)'}}),_e=new Lo({props:{$$slots:{default:[yn]},$$scope:{ctx:S}}}),Ue=new da({}),ve=new kn({props:{pytorch:!0,tensorflow:!0,jax:!1,$$slots:{tensorflow:[Bn],pytorch:[Pn]},$$scope:{ctx:S}}}),Xe=new da({}),be=new kn({props:{pytorch:!0,tensorflow:!0,jax:!1,$$slots:{tensorflow:[Mn],pytorch:[On]},$$scope:{ctx:S}}}),Ke=new da({}),we=new Lo({props:{warning:!0,$$slots:{default:[Sn]},$$scope:{ctx:S}}}),Ze=new I({props:{code:`from transformers import DistilBertTokenizer my_tokenizer = DistilBertTokenizer(vocab_file="my_vocab_file.txt", do_lower_case=False, padding_side="left")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DistilBertTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>my_tokenizer = DistilBertTokenizer(vocab_file=<span class="hljs-string">&quot;my_vocab_file.txt&quot;</span>, do_lower_case=<span class="hljs-literal">False</span>, padding_side=<span class="hljs-string">&quot;left&quot;</span>)`}}),ea=new I({props:{code:`from transformers import DistilBertTokenizer slow_tokenizer = DistilBertTokenizer.from_pretrained("distilbert-base-uncased")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DistilBertTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>slow_tokenizer = DistilBertTokenizer.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)`}}),aa=new I({props:{code:`from transformers import DistilBertTokenizerFast fast_tokenizer = DistilBertTokenizerFast.from_pretrained("distilbert-base-uncased")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DistilBertTokenizerFast <span class="hljs-meta">&gt;&gt;&gt; </span>fast_tokenizer = DistilBertTokenizerFast.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)`}}),Ce=new Lo({props:{$$slots:{default:[Nn]},$$scope:{ctx:S}}}),sa=new da({}),oa=new I({props:{code:`from transformers import ViTFeatureExtractor vit_extractor = ViTFeatureExtractor() print(vit_extractor)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ViTFeatureExtractor <span class="hljs-meta">&gt;&gt;&gt; </span>vit_extractor = ViTFeatureExtractor() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(vit_extractor) ViTFeatureExtractor { <span class="hljs-string">&quot;do_normalize&quot;</span>: true, <span class="hljs-string">&quot;do_resize&quot;</span>: true, <span class="hljs-string">&quot;feature_extractor_type&quot;</span>: <span class="hljs-string">&quot;ViTFeatureExtractor&quot;</span>, <span class="hljs-string">&quot;image_mean&quot;</span>: [ <span class="hljs-number">0.5</span>, <span class="hljs-number">0.5</span>, <span class="hljs-number">0.5</span> ], <span class="hljs-string">&quot;image_std&quot;</span>: [ <span class="hljs-number">0.5</span>, <span class="hljs-number">0.5</span>, <span class="hljs-number">0.5</span> ], <span class="hljs-string">&quot;resample&quot;</span>: <span class="hljs-number">2</span>, <span class="hljs-string">&quot;size&quot;</span>: <span class="hljs-number">224</span> }`}}),Te=new Lo({props:{$$slots:{default:[In]},$$scope:{ctx:S}}}),ta=new I({props:{code:`from transformers import ViTFeatureExtractor my_vit_extractor = ViTFeatureExtractor(resample="PIL.Image.BOX", do_normalize=False, image_mean=[0.3, 0.3, 0.3]) print(my_vit_extractor)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> ViTFeatureExtractor <span class="hljs-meta">&gt;&gt;&gt; </span>my_vit_extractor = ViTFeatureExtractor(resample=<span class="hljs-string">&quot;PIL.Image.BOX&quot;</span>, do_normalize=<span class="hljs-literal">False</span>, image_mean=[<span class="hljs-number">0.3</span>, <span class="hljs-number">0.3</span>, <span class="hljs-number">0.3</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(my_vit_extractor) ViTFeatureExtractor { <span class="hljs-string">&quot;do_normalize&quot;</span>: false, <span class="hljs-string">&quot;do_resize&quot;</span>: true, <span class="hljs-string">&quot;feature_extractor_type&quot;</span>: <span class="hljs-string">&quot;ViTFeatureExtractor&quot;</span>, <span class="hljs-string">&quot;image_mean&quot;</span>: [ <span class="hljs-number">0.3</span>, <span class="hljs-number">0.3</span>, <span class="hljs-number">0.3</span> ], <span class="hljs-string">&quot;image_std&quot;</span>: [ <span class="hljs-number">0.5</span>, <span class="hljs-number">0.5</span>, <span class="hljs-number">0.5</span> ], <span class="hljs-string">&quot;resample&quot;</span>: <span class="hljs-string">&quot;PIL.Image.BOX&quot;</span>, <span class="hljs-string">&quot;size&quot;</span>: <span class="hljs-number">224</span> }`}}),ra=new I({props:{code:`from transformers import Wav2Vec2FeatureExtractor w2v2_extractor = Wav2Vec2FeatureExtractor() print(w2v2_extractor)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2FeatureExtractor <span class="hljs-meta">&gt;&gt;&gt; </span>w2v2_extractor = Wav2Vec2FeatureExtractor() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(w2v2_extractor) Wav2Vec2FeatureExtractor { <span class="hljs-string">&quot;do_normalize&quot;</span>: true, <span class="hljs-string">&quot;feature_extractor_type&quot;</span>: <span class="hljs-string">&quot;Wav2Vec2FeatureExtractor&quot;</span>, <span class="hljs-string">&quot;feature_size&quot;</span>: <span class="hljs-number">1</span>, <span class="hljs-string">&quot;padding_side&quot;</span>: <span class="hljs-string">&quot;right&quot;</span>, <span class="hljs-string">&quot;padding_value&quot;</span>: <span class="hljs-number">0.0</span>, <span class="hljs-string">&quot;return_attention_mask&quot;</span>: false, <span class="hljs-string">&quot;sampling_rate&quot;</span>: <span class="hljs-number">16000</span> }`}}),ia=new da({}),na=new I({props:{code:`from transformers import Wav2Vec2FeatureExtractor feature_extractor = Wav2Vec2FeatureExtractor(padding_value=1.0, do_normalize=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2FeatureExtractor <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = Wav2Vec2FeatureExtractor(padding_value=<span class="hljs-number">1.0</span>, do_normalize=<span class="hljs-literal">True</span>)`}}),la=new I({props:{code:`from transformers import Wav2Vec2CTCTokenizer tokenizer = Wav2Vec2CTCTokenizer(vocab_file="my_vocab_file.txt")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2CTCTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = Wav2Vec2CTCTokenizer(vocab_file=<span class="hljs-string">&quot;my_vocab_file.txt&quot;</span>)`}}),pa=new I({props:{code:`from transformers import Wav2Vec2Processor processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> Wav2Vec2Processor <span class="hljs-meta">&gt;&gt;&gt; </span>processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)`}}),{c(){m=r("meta"),g=c(),u=r("h1"),_=r("a"),q=r("span"),j(v.$$.fragment),$=c(),y=r("span"),E=o("Criar uma arquitetura customizada"),N=c(),C=r("p"),M=o("Uma "),B=r("a"),O=r("code"),T=o("AutoClass"),V=o(" automaticamente infere a arquitetura do modelo e baixa configura\xE7\xF5es e pesos pr\xE9-treinados. Geralmente, n\xF3s recomendamos usar uma "),h=r("code"),F=o("AutoClass"),R=o(" para produzir um c\xF3digo independente de checkpoints. Mas usu\xE1rios que querem mais contole sobre par\xE2metros espec\xEDficos do modelo pode criar um modelo customizado \u{1F917} Transformers a partir de algumas classes bases. Isso pode ser particulamente \xFAtil para algu\xE9m que est\xE1 interessado em estudar, treinar ou fazer experimentos com um modelo \u{1F917} Transformers. Nesse tutorial, ser\xE1 explicado como criar um modelo customizado sem uma "),A=r("code"),L=o("AutoClass"),d=o(". Aprenda como:"),P=c(),W=r("ul"),H=r("li"),se=o("Carregar e customizar a configura\xE7\xE3o de um modelo."),oe=c(),wa=r("li"),Ho=o("Criar a arquitetura de um modelo."),Go=c(),za=r("li"),Jo=o("Criar um tokenizer r\xE1pido e devagar para textos."),Xo=c(),xa=r("li"),Ko=o("Criar extrator de features para tarefas envolvendo audio e imagem."),Yo=c(),Da=r("li"),Zo=o("Criar um processador para tarefas multimodais."),ks=c(),te=r("h2"),ue=r("a"),Ca=r("span"),j(Me.$$.fragment),et=c(),ya=r("span"),at=o("configuration"),ws=c(),U=r("p"),st=o("A "),ca=r("a"),ot=o("configuration"),tt=o(" refere-se a atributos espec\xEDficos de um modelo. Cada configura\xE7\xE3o de modelo tem atributos diferentes; por exemplo, todos modelo de PLN possuem os atributos "),Ta=r("code"),rt=o("hidden_size"),it=o(", "),Pa=r("code"),nt=o("num_attention_heads"),lt=o(", "),Fa=r("code"),pt=o("num_hidden_layers"),mt=o(" e "),Ba=r("code"),ut=o("vocab_size"),dt=o(" em comum. Esse atributos especificam o numero de \u2018attention heads\u2019 ou \u2018hidden layers\u2019 para construir um modelo."),zs=c(),Y=r("p"),ct=o("D\xEA uma olhada a mais em "),fa=r("a"),ft=o("DistilBERT"),ht=o(" acessando "),Aa=r("code"),_t=o("DistilBertConfig"),gt=o(" para observar esses atributos:"),xs=c(),j(Se.$$.fragment),Ds=c(),re=r("p"),Oa=r("code"),vt=o("DistilBertConfig"),$t=o(" mostra todos os atributos padr\xF5es usados para construir um "),Va=r("code"),qt=o("DistilBertModel"),bt=o(" base. Todos atributos s\xE3o customiz\xE1veis, o que cria espa\xE7o para experimentos. Por exemplo, voc\xEA pode customizar um modelo padr\xE3o para:"),Cs=c(),de=r("ul"),Ne=r("li"),Et=o("Tentar uma fun\xE7\xE3o de ativa\xE7\xE3o diferente com o par\xE2metro "),Ma=r("code"),jt=o("activation"),kt=o("."),wt=c(),Ie=r("li"),zt=o("Usar uma taxa de desist\xEAncia maior para as probabilidades de \u2018attention\u2019 com o par\xE2metro "),Sa=r("code"),xt=o("attention_dropout"),Dt=o("."),ys=c(),j(We.$$.fragment),Ts=c(),ce=r("p"),Ct=o("Atributos de um modelo pr\xE9-treinado podem ser modificados na fun\xE7\xE3o "),Na=r("code"),yt=o("from_pretrained()"),Tt=o(":"),Ps=c(),j(Re.$$.fragment),Fs=c(),fe=r("p"),Pt=o("Uma vez que voc\xEA est\xE1 satisfeito com as configura\xE7\xF5es do seu modelo, voc\xEA consegue salvar elas com "),Ia=r("code"),Ft=o("save_pretrained()"),Bt=o(". Seu arquivo de configura\xE7\xF5es est\xE1 salvo como um arquivo JSON no diret\xF3rio especificado:"),Bs=c(),j(Le.$$.fragment),As=c(),he=r("p"),At=o("Para reusar o arquivo de configura\xE7\xF5es, carregue com "),Wa=r("code"),Ot=o("from_pretrained()"),Vt=o(":"),Os=c(),j(Qe.$$.fragment),Vs=c(),j(_e.$$.fragment),Ms=c(),ie=r("h2"),ge=r("a"),Ra=r("span"),j(Ue.$$.fragment),Mt=c(),La=r("span"),St=o("Modelo"),Ss=c(),Q=r("p"),Nt=o("O pr\xF3ximo passo \xE9 criar um "),ha=r("a"),It=o("model"),Wt=o(". O modelo - tamb\xE9m vagamente referido como arquitetura - define o que cada camada est\xE1 fazendo e quais opera\xE7\xF5es est\xE3o acontecendo. Atributos como "),Qa=r("code"),Rt=o("num_hidden_layers"),Lt=o(" das configura\xE7\xF5es s\xE3o utilizados para definir a arquitetura. Todo modelo compartilha a classe base "),Ua=r("code"),Qt=o("PreTrainedModel"),Ut=o(" e alguns m\xE9todos em comum como redimensionar o tamanho dos embeddings de entrada e podar as \u2018self-attention heads\u2019. Al\xE9m disso, todos os modelos tamb\xE9m s\xE3o subclasses de "),He=r("a"),Ha=r("code"),Ht=o("torch.nn.Module"),Gt=o(", "),Ge=r("a"),Ga=r("code"),Jt=o("tf.keras.Model"),Xt=o(" ou "),Je=r("a"),Ja=r("code"),Kt=o("flax.linen.Module"),Yt=o(". Isso significa que os modelos s\xE3o compat\xEDveis com cada respectivo uso de framework."),Ns=c(),j(ve.$$.fragment),Is=c(),ne=r("h3"),$e=r("a"),Xa=r("span"),j(Xe.$$.fragment),Zt=c(),Ka=r("span"),er=o("Heads do modelo"),Ws=c(),qe=r("p"),ar=o("Neste ponto, voc\xEA tem um modelo b\xE1sico do DistilBERT que gera os "),Ya=r("em"),sr=o("estados ocultos"),or=o(". Os estados ocultos s\xE3o passados como entrada para a head do moelo para produzir a sa\xEDda final. \u{1F917} Transformers fornece uma head de modelo diferente para cada tarefa desde que o modelo suporte essa tarefa (por exemplo, voc\xEA n\xE3o consegue utilizar o modelo DistilBERT para uma tarefa de \u2018sequence-to-sequence\u2019 como tradu\xE7\xE3o)."),Rs=c(),j(be.$$.fragment),Ls=c(),le=r("h2"),Ee=r("a"),Za=r("span"),j(Ke.$$.fragment),tr=c(),es=r("span"),rr=o("Tokenizer"),Qs=c(),je=r("p"),ir=o("A \xFAtlima classe base que voc\xEA precisa antes de usar um modelo para dados textuais \xE9 a "),_a=r("a"),nr=o("tokenizer"),lr=o(" para converter textos originais para tensores. Existem dois tipos de tokenizers que voc\xEA pode usar com \u{1F917} Transformers:"),Us=c(),ke=r("ul"),ga=r("li"),as=r("code"),pr=o("PreTrainedTokenizer"),mr=o(": uma implementa\xE7\xE3o em Python de um tokenizer."),ur=c(),Z=r("li"),ss=r("code"),dr=o("PreTrainedTokenizerFast"),cr=o(": um tokenizer da nossa biblioteca "),Ye=r("a"),fr=o("\u{1F917} Tokenizer"),hr=o(" baseada em Rust. Esse tipo de tokenizer \xE9 significantemente mais rapido - especialmente durante tokenization de codifica\xE7\xE3o - devido a implementa\xE7\xE3o em Rust. O tokenizer r\xE1pido tambem oferece m\xE9todos adicionais como "),os=r("em"),_r=o("offset mapping"),gr=o(" que mapeia tokens para suar palavras ou caracteres originais."),Hs=c(),va=r("p"),vr=o("Os dois tokenizers suporta m\xE9todos comuns como os de codificar e decodificar, adicionar novos tokens, e gerenciar tokens especiais."),Gs=c(),j(we.$$.fragment),Js=c(),ze=r("p"),$r=o("Se voc\xEA treinou seu pr\xF3rpio tokenizer, voc\xEA pode criar um a partir do seu arquivo "),ts=r("em"),qr=o("vocabulary"),br=o(":"),Xs=c(),j(Ze.$$.fragment),Ks=c(),xe=r("p"),Er=o("\xC9 importante lembrar que o vocabul\xE1rio de um tokenizer customizado ser\xE1 diferente de um vocabul\xE1rio gerado pelo tokenizer de um modelo pr\xE9 treinado. Voc\xEA precisa usar o vocabul\xE1rio de um modelo pr\xE9 treinado se voc\xEA estiver usando um modelo pr\xE9 treinado, caso contr\xE1rio as entradas n\xE3o far\xE3o sentido. Criando um tokenizer com um vocabul\xE1rio de um modelo pr\xE9 treinado com a classe "),rs=r("code"),jr=o("DistilBertTokenizer"),kr=o(":"),Ys=c(),j(ea.$$.fragment),Zs=c(),De=r("p"),wr=o("Criando um \u2018fast tokenizer\u2019 com a classe "),is=r("code"),zr=o("DistilBertTokenizerFast"),xr=o(":"),eo=c(),j(aa.$$.fragment),ao=c(),j(Ce.$$.fragment),so=c(),pe=r("h2"),ye=r("a"),ns=r("span"),j(sa.$$.fragment),Dr=c(),ls=r("span"),Cr=o("Extrator de features"),oo=c(),J=r("p"),yr=o("Um extrator de features processa entradas de imagem ou \xE1udio. Ele herda da classe base "),ps=r("code"),Tr=o("FeatureExtractionMixin"),Pr=o(", e pode tamb\xE9m herdar da classe "),ms=r("code"),Fr=o("ImageFeatureExtractionMixin"),Br=o(" para processamento de features de imagem ou da classe "),us=r("code"),Ar=o("SequenceFeatureExtractor"),Or=o(" para processamento de entradas de \xE1udio."),to=c(),ee=r("p"),Vr=o("Dependendo do que voc\xEA est\xE1 trabalhando em um audio ou uma tarefa de vis\xE3o, crie um estrator de features associado com o modelo que voc\xEA est\xE1 usando. Por exemplo, crie um "),ds=r("code"),Mr=o("ViTFeatureExtractor"),Sr=o(" padr\xE3o se voc\xEA estiver usando "),$a=r("a"),Nr=o("ViT"),Ir=o(" para classifica\xE7\xE3o de imagens:"),ro=c(),j(oa.$$.fragment),io=c(),j(Te.$$.fragment),no=c(),Pe=r("p"),Wr=o("Modifique qualquer par\xE2metro dentre os "),cs=r("code"),Rr=o("ViTFeatureExtractor"),Lr=o(" para criar seu extrator de features customizado."),lo=c(),j(ta.$$.fragment),po=c(),Fe=r("p"),Qr=o("Para entradas de \xE1utio, voc\xEA pode criar um "),fs=r("code"),Ur=o("Wav2Vec2FeatureExtractor"),Hr=o(" e customizar os par\xE2metros de uma forma similar:"),mo=c(),j(ra.$$.fragment),uo=c(),me=r("h2"),Be=r("a"),hs=r("span"),j(ia.$$.fragment),Gr=c(),_s=r("span"),Jr=o("Processor"),co=c(),Ae=r("p"),Xr=o("Para modelos que suportam tarefas multimodais, \u{1F917} Transformers oferece uma classe processadora que convenientemente cobre um extrator de features e tokenizer dentro de um \xFAnico objeto. Por exemplo, vamos usar o "),gs=r("code"),Kr=o("Wav2Vec2Processor"),Yr=o(" para uma tarefa de reconhecimento de fala autom\xE1tica (ASR). ASR transcreve \xE1udio para texto, ent\xE3o voc\xEA ir\xE1 precisar de um extrator de um features e um tokenizer."),fo=c(),qa=r("p"),Zr=o("Crie um extrator de features para lidar com as entradas de \xE1udio."),ho=c(),j(na.$$.fragment),_o=c(),ba=r("p"),ei=o("Crie um tokenizer para lidar com a entrada de textos:"),go=c(),j(la.$$.fragment),vo=c(),Oe=r("p"),ai=o("Combine o extrator de features e o tokenizer no "),vs=r("code"),si=o("Wav2Vec2Processor"),oi=o(":"),$o=c(),j(pa.$$.fragment),qo=c(),Ea=r("p"),ti=o("Com duas classes b\xE1sicas - configura\xE7\xE3o e modelo - e um preprocessamento de classe adicional (tokenizer, extrator de features, ou processador), voc\xEA pode criar qualquer modelo que suportado por \u{1F917} Transformers. Qualquer uma dessas classes base s\xE3o configur\xE1veis, te permitindo usar os atributos espec\xEDficos que voc\xEA queira. Voc\xEA pode facilmente preparar um modelo para treinamento ou modificar um modelo pr\xE9-treinado com poucas mudan\xE7as."),this.h()},l(e){const l=Dn('[data-svelte="svelte-1phssyn"]',document.head);m=i(l,"META",{name:!0,content:!0}),l.forEach(s),g=f(e),u=i(e,"H1",{class:!0});var ma=n(u);_=i(ma,"A",{id:!0,class:!0,href:!0});var $s=n(_);q=i($s,"SPAN",{});var qs=n(q);k(v.$$.fragment,qs),qs.forEach(s),$s.forEach(s),$=f(ma),y=i(ma,"SPAN",{});var bs=n(y);E=t(bs,"Criar uma arquitetura customizada"),bs.forEach(s),ma.forEach(s),N=f(e),C=i(e,"P",{});var K=n(C);M=t(K,"Uma "),B=i(K,"A",{href:!0});var Es=n(B);O=i(Es,"CODE",{});var ii=n(O);T=t(ii,"AutoClass"),ii.forEach(s),Es.forEach(s),V=t(K," automaticamente infere a arquitetura do modelo e baixa configura\xE7\xF5es e pesos pr\xE9-treinados. Geralmente, n\xF3s recomendamos usar uma "),h=i(K,"CODE",{});var ni=n(h);F=t(ni,"AutoClass"),ni.forEach(s),R=t(K," para produzir um c\xF3digo independente de checkpoints. Mas usu\xE1rios que querem mais contole sobre par\xE2metros espec\xEDficos do modelo pode criar um modelo customizado \u{1F917} Transformers a partir de algumas classes bases. Isso pode ser particulamente \xFAtil para algu\xE9m que est\xE1 interessado em estudar, treinar ou fazer experimentos com um modelo \u{1F917} Transformers. Nesse tutorial, ser\xE1 explicado como criar um modelo customizado sem uma "),A=i(K,"CODE",{});var li=n(A);L=t(li,"AutoClass"),li.forEach(s),d=t(K,". Aprenda como:"),K.forEach(s),P=f(e),W=i(e,"UL",{});var ae=n(W);H=i(ae,"LI",{});var pi=n(H);se=t(pi,"Carregar e customizar a configura\xE7\xE3o de um modelo."),pi.forEach(s),oe=f(ae),wa=i(ae,"LI",{});var mi=n(wa);Ho=t(mi,"Criar a arquitetura de um modelo."),mi.forEach(s),Go=f(ae),za=i(ae,"LI",{});var ui=n(za);Jo=t(ui,"Criar um tokenizer r\xE1pido e devagar para textos."),ui.forEach(s),Xo=f(ae),xa=i(ae,"LI",{});var di=n(xa);Ko=t(di,"Criar extrator de features para tarefas envolvendo audio e imagem."),di.forEach(s),Yo=f(ae),Da=i(ae,"LI",{});var ci=n(Da);Zo=t(ci,"Criar um processador para tarefas multimodais."),ci.forEach(s),ae.forEach(s),ks=f(e),te=i(e,"H2",{class:!0});var Eo=n(te);ue=i(Eo,"A",{id:!0,class:!0,href:!0});var fi=n(ue);Ca=i(fi,"SPAN",{});var hi=n(Ca);k(Me.$$.fragment,hi),hi.forEach(s),fi.forEach(s),et=f(Eo),ya=i(Eo,"SPAN",{});var _i=n(ya);at=t(_i,"configuration"),_i.forEach(s),Eo.forEach(s),ws=f(e),U=i(e,"P",{});var X=n(U);st=t(X,"A "),ca=i(X,"A",{href:!0});var gi=n(ca);ot=t(gi,"configuration"),gi.forEach(s),tt=t(X," refere-se a atributos espec\xEDficos de um modelo. Cada configura\xE7\xE3o de modelo tem atributos diferentes; por exemplo, todos modelo de PLN possuem os atributos "),Ta=i(X,"CODE",{});var vi=n(Ta);rt=t(vi,"hidden_size"),vi.forEach(s),it=t(X,", "),Pa=i(X,"CODE",{});var $i=n(Pa);nt=t($i,"num_attention_heads"),$i.forEach(s),lt=t(X,", "),Fa=i(X,"CODE",{});var qi=n(Fa);pt=t(qi,"num_hidden_layers"),qi.forEach(s),mt=t(X," e "),Ba=i(X,"CODE",{});var bi=n(Ba);ut=t(bi,"vocab_size"),bi.forEach(s),dt=t(X," em comum. Esse atributos especificam o numero de \u2018attention heads\u2019 ou \u2018hidden layers\u2019 para construir um modelo."),X.forEach(s),zs=f(e),Y=i(e,"P",{});var ja=n(Y);ct=t(ja,"D\xEA uma olhada a mais em "),fa=i(ja,"A",{href:!0});var Ei=n(fa);ft=t(Ei,"DistilBERT"),Ei.forEach(s),ht=t(ja," acessando "),Aa=i(ja,"CODE",{});var ji=n(Aa);_t=t(ji,"DistilBertConfig"),ji.forEach(s),gt=t(ja," para observar esses atributos:"),ja.forEach(s),xs=f(e),k(Se.$$.fragment,e),Ds=f(e),re=i(e,"P",{});var js=n(re);Oa=i(js,"CODE",{});var ki=n(Oa);vt=t(ki,"DistilBertConfig"),ki.forEach(s),$t=t(js," mostra todos os atributos padr\xF5es usados para construir um "),Va=i(js,"CODE",{});var wi=n(Va);qt=t(wi,"DistilBertModel"),wi.forEach(s),bt=t(js," base. Todos atributos s\xE3o customiz\xE1veis, o que cria espa\xE7o para experimentos. Por exemplo, voc\xEA pode customizar um modelo padr\xE3o para:"),js.forEach(s),Cs=f(e),de=i(e,"UL",{});var jo=n(de);Ne=i(jo,"LI",{});var ko=n(Ne);Et=t(ko,"Tentar uma fun\xE7\xE3o de ativa\xE7\xE3o diferente com o par\xE2metro "),Ma=i(ko,"CODE",{});var zi=n(Ma);jt=t(zi,"activation"),zi.forEach(s),kt=t(ko,"."),ko.forEach(s),wt=f(jo),Ie=i(jo,"LI",{});var wo=n(Ie);zt=t(wo,"Usar uma taxa de desist\xEAncia maior para as probabilidades de \u2018attention\u2019 com o par\xE2metro "),Sa=i(wo,"CODE",{});var xi=n(Sa);xt=t(xi,"attention_dropout"),xi.forEach(s),Dt=t(wo,"."),wo.forEach(s),jo.forEach(s),ys=f(e),k(We.$$.fragment,e),Ts=f(e),ce=i(e,"P",{});var zo=n(ce);Ct=t(zo,"Atributos de um modelo pr\xE9-treinado podem ser modificados na fun\xE7\xE3o "),Na=i(zo,"CODE",{});var Di=n(Na);yt=t(Di,"from_pretrained()"),Di.forEach(s),Tt=t(zo,":"),zo.forEach(s),Ps=f(e),k(Re.$$.fragment,e),Fs=f(e),fe=i(e,"P",{});var xo=n(fe);Pt=t(xo,"Uma vez que voc\xEA est\xE1 satisfeito com as configura\xE7\xF5es do seu modelo, voc\xEA consegue salvar elas com "),Ia=i(xo,"CODE",{});var Ci=n(Ia);Ft=t(Ci,"save_pretrained()"),Ci.forEach(s),Bt=t(xo,". Seu arquivo de configura\xE7\xF5es est\xE1 salvo como um arquivo JSON no diret\xF3rio especificado:"),xo.forEach(s),Bs=f(e),k(Le.$$.fragment,e),As=f(e),he=i(e,"P",{});var Do=n(he);At=t(Do,"Para reusar o arquivo de configura\xE7\xF5es, carregue com "),Wa=i(Do,"CODE",{});var yi=n(Wa);Ot=t(yi,"from_pretrained()"),yi.forEach(s),Vt=t(Do,":"),Do.forEach(s),Os=f(e),k(Qe.$$.fragment,e),Vs=f(e),k(_e.$$.fragment,e),Ms=f(e),ie=i(e,"H2",{class:!0});var Co=n(ie);ge=i(Co,"A",{id:!0,class:!0,href:!0});var Ti=n(ge);Ra=i(Ti,"SPAN",{});var Pi=n(Ra);k(Ue.$$.fragment,Pi),Pi.forEach(s),Ti.forEach(s),Mt=f(Co),La=i(Co,"SPAN",{});var Fi=n(La);St=t(Fi,"Modelo"),Fi.forEach(s),Co.forEach(s),Ss=f(e),Q=i(e,"P",{});var G=n(Q);Nt=t(G,"O pr\xF3ximo passo \xE9 criar um "),ha=i(G,"A",{href:!0});var Bi=n(ha);It=t(Bi,"model"),Bi.forEach(s),Wt=t(G,". O modelo - tamb\xE9m vagamente referido como arquitetura - define o que cada camada est\xE1 fazendo e quais opera\xE7\xF5es est\xE3o acontecendo. Atributos como "),Qa=i(G,"CODE",{});var Ai=n(Qa);Rt=t(Ai,"num_hidden_layers"),Ai.forEach(s),Lt=t(G," das configura\xE7\xF5es s\xE3o utilizados para definir a arquitetura. Todo modelo compartilha a classe base "),Ua=i(G,"CODE",{});var Oi=n(Ua);Qt=t(Oi,"PreTrainedModel"),Oi.forEach(s),Ut=t(G," e alguns m\xE9todos em comum como redimensionar o tamanho dos embeddings de entrada e podar as \u2018self-attention heads\u2019. Al\xE9m disso, todos os modelos tamb\xE9m s\xE3o subclasses de "),He=i(G,"A",{href:!0,rel:!0});var Vi=n(He);Ha=i(Vi,"CODE",{});var Mi=n(Ha);Ht=t(Mi,"torch.nn.Module"),Mi.forEach(s),Vi.forEach(s),Gt=t(G,", "),Ge=i(G,"A",{href:!0,rel:!0});var Si=n(Ge);Ga=i(Si,"CODE",{});var Ni=n(Ga);Jt=t(Ni,"tf.keras.Model"),Ni.forEach(s),Si.forEach(s),Xt=t(G," ou "),Je=i(G,"A",{href:!0,rel:!0});var Ii=n(Je);Ja=i(Ii,"CODE",{});var Wi=n(Ja);Kt=t(Wi,"flax.linen.Module"),Wi.forEach(s),Ii.forEach(s),Yt=t(G,". Isso significa que os modelos s\xE3o compat\xEDveis com cada respectivo uso de framework."),G.forEach(s),Ns=f(e),k(ve.$$.fragment,e),Is=f(e),ne=i(e,"H3",{class:!0});var yo=n(ne);$e=i(yo,"A",{id:!0,class:!0,href:!0});var Ri=n($e);Xa=i(Ri,"SPAN",{});var Li=n(Xa);k(Xe.$$.fragment,Li),Li.forEach(s),Ri.forEach(s),Zt=f(yo),Ka=i(yo,"SPAN",{});var Qi=n(Ka);er=t(Qi,"Heads do modelo"),Qi.forEach(s),yo.forEach(s),Ws=f(e),qe=i(e,"P",{});var To=n(qe);ar=t(To,"Neste ponto, voc\xEA tem um modelo b\xE1sico do DistilBERT que gera os "),Ya=i(To,"EM",{});var Ui=n(Ya);sr=t(Ui,"estados ocultos"),Ui.forEach(s),or=t(To,". Os estados ocultos s\xE3o passados como entrada para a head do moelo para produzir a sa\xEDda final. \u{1F917} Transformers fornece uma head de modelo diferente para cada tarefa desde que o modelo suporte essa tarefa (por exemplo, voc\xEA n\xE3o consegue utilizar o modelo DistilBERT para uma tarefa de \u2018sequence-to-sequence\u2019 como tradu\xE7\xE3o)."),To.forEach(s),Rs=f(e),k(be.$$.fragment,e),Ls=f(e),le=i(e,"H2",{class:!0});var Po=n(le);Ee=i(Po,"A",{id:!0,class:!0,href:!0});var Hi=n(Ee);Za=i(Hi,"SPAN",{});var Gi=n(Za);k(Ke.$$.fragment,Gi),Gi.forEach(s),Hi.forEach(s),tr=f(Po),es=i(Po,"SPAN",{});var Ji=n(es);rr=t(Ji,"Tokenizer"),Ji.forEach(s),Po.forEach(s),Qs=f(e),je=i(e,"P",{});var Fo=n(je);ir=t(Fo,"A \xFAtlima classe base que voc\xEA precisa antes de usar um modelo para dados textuais \xE9 a "),_a=i(Fo,"A",{href:!0});var Xi=n(_a);nr=t(Xi,"tokenizer"),Xi.forEach(s),lr=t(Fo," para converter textos originais para tensores. Existem dois tipos de tokenizers que voc\xEA pode usar com \u{1F917} Transformers:"),Fo.forEach(s),Us=f(e),ke=i(e,"UL",{});var Bo=n(ke);ga=i(Bo,"LI",{});var ri=n(ga);as=i(ri,"CODE",{});var Ki=n(as);pr=t(Ki,"PreTrainedTokenizer"),Ki.forEach(s),mr=t(ri,": uma implementa\xE7\xE3o em Python de um tokenizer."),ri.forEach(s),ur=f(Bo),Z=i(Bo,"LI",{});var ua=n(Z);ss=i(ua,"CODE",{});var Yi=n(ss);dr=t(Yi,"PreTrainedTokenizerFast"),Yi.forEach(s),cr=t(ua,": um tokenizer da nossa biblioteca "),Ye=i(ua,"A",{href:!0,rel:!0});var Zi=n(Ye);fr=t(Zi,"\u{1F917} Tokenizer"),Zi.forEach(s),hr=t(ua," baseada em Rust. Esse tipo de tokenizer \xE9 significantemente mais rapido - especialmente durante tokenization de codifica\xE7\xE3o - devido a implementa\xE7\xE3o em Rust. O tokenizer r\xE1pido tambem oferece m\xE9todos adicionais como "),os=i(ua,"EM",{});var en=n(os);_r=t(en,"offset mapping"),en.forEach(s),gr=t(ua," que mapeia tokens para suar palavras ou caracteres originais."),ua.forEach(s),Bo.forEach(s),Hs=f(e),va=i(e,"P",{});var an=n(va);vr=t(an,"Os dois tokenizers suporta m\xE9todos comuns como os de codificar e decodificar, adicionar novos tokens, e gerenciar tokens especiais."),an.forEach(s),Gs=f(e),k(we.$$.fragment,e),Js=f(e),ze=i(e,"P",{});var Ao=n(ze);$r=t(Ao,"Se voc\xEA treinou seu pr\xF3rpio tokenizer, voc\xEA pode criar um a partir do seu arquivo "),ts=i(Ao,"EM",{});var sn=n(ts);qr=t(sn,"vocabulary"),sn.forEach(s),br=t(Ao,":"),Ao.forEach(s),Xs=f(e),k(Ze.$$.fragment,e),Ks=f(e),xe=i(e,"P",{});var Oo=n(xe);Er=t(Oo,"\xC9 importante lembrar que o vocabul\xE1rio de um tokenizer customizado ser\xE1 diferente de um vocabul\xE1rio gerado pelo tokenizer de um modelo pr\xE9 treinado. Voc\xEA precisa usar o vocabul\xE1rio de um modelo pr\xE9 treinado se voc\xEA estiver usando um modelo pr\xE9 treinado, caso contr\xE1rio as entradas n\xE3o far\xE3o sentido. Criando um tokenizer com um vocabul\xE1rio de um modelo pr\xE9 treinado com a classe "),rs=i(Oo,"CODE",{});var on=n(rs);jr=t(on,"DistilBertTokenizer"),on.forEach(s),kr=t(Oo,":"),Oo.forEach(s),Ys=f(e),k(ea.$$.fragment,e),Zs=f(e),De=i(e,"P",{});var Vo=n(De);wr=t(Vo,"Criando um \u2018fast tokenizer\u2019 com a classe "),is=i(Vo,"CODE",{});var tn=n(is);zr=t(tn,"DistilBertTokenizerFast"),tn.forEach(s),xr=t(Vo,":"),Vo.forEach(s),eo=f(e),k(aa.$$.fragment,e),ao=f(e),k(Ce.$$.fragment,e),so=f(e),pe=i(e,"H2",{class:!0});var Mo=n(pe);ye=i(Mo,"A",{id:!0,class:!0,href:!0});var rn=n(ye);ns=i(rn,"SPAN",{});var nn=n(ns);k(sa.$$.fragment,nn),nn.forEach(s),rn.forEach(s),Dr=f(Mo),ls=i(Mo,"SPAN",{});var ln=n(ls);Cr=t(ln,"Extrator de features"),ln.forEach(s),Mo.forEach(s),oo=f(e),J=i(e,"P",{});var Ve=n(J);yr=t(Ve,"Um extrator de features processa entradas de imagem ou \xE1udio. Ele herda da classe base "),ps=i(Ve,"CODE",{});var pn=n(ps);Tr=t(pn,"FeatureExtractionMixin"),pn.forEach(s),Pr=t(Ve,", e pode tamb\xE9m herdar da classe "),ms=i(Ve,"CODE",{});var mn=n(ms);Fr=t(mn,"ImageFeatureExtractionMixin"),mn.forEach(s),Br=t(Ve," para processamento de features de imagem ou da classe "),us=i(Ve,"CODE",{});var un=n(us);Ar=t(un,"SequenceFeatureExtractor"),un.forEach(s),Or=t(Ve," para processamento de entradas de \xE1udio."),Ve.forEach(s),to=f(e),ee=i(e,"P",{});var ka=n(ee);Vr=t(ka,"Dependendo do que voc\xEA est\xE1 trabalhando em um audio ou uma tarefa de vis\xE3o, crie um estrator de features associado com o modelo que voc\xEA est\xE1 usando. Por exemplo, crie um "),ds=i(ka,"CODE",{});var dn=n(ds);Mr=t(dn,"ViTFeatureExtractor"),dn.forEach(s),Sr=t(ka," padr\xE3o se voc\xEA estiver usando "),$a=i(ka,"A",{href:!0});var cn=n($a);Nr=t(cn,"ViT"),cn.forEach(s),Ir=t(ka," para classifica\xE7\xE3o de imagens:"),ka.forEach(s),ro=f(e),k(oa.$$.fragment,e),io=f(e),k(Te.$$.fragment,e),no=f(e),Pe=i(e,"P",{});var So=n(Pe);Wr=t(So,"Modifique qualquer par\xE2metro dentre os "),cs=i(So,"CODE",{});var fn=n(cs);Rr=t(fn,"ViTFeatureExtractor"),fn.forEach(s),Lr=t(So," para criar seu extrator de features customizado."),So.forEach(s),lo=f(e),k(ta.$$.fragment,e),po=f(e),Fe=i(e,"P",{});var No=n(Fe);Qr=t(No,"Para entradas de \xE1utio, voc\xEA pode criar um "),fs=i(No,"CODE",{});var hn=n(fs);Ur=t(hn,"Wav2Vec2FeatureExtractor"),hn.forEach(s),Hr=t(No," e customizar os par\xE2metros de uma forma similar:"),No.forEach(s),mo=f(e),k(ra.$$.fragment,e),uo=f(e),me=i(e,"H2",{class:!0});var Io=n(me);Be=i(Io,"A",{id:!0,class:!0,href:!0});var _n=n(Be);hs=i(_n,"SPAN",{});var gn=n(hs);k(ia.$$.fragment,gn),gn.forEach(s),_n.forEach(s),Gr=f(Io),_s=i(Io,"SPAN",{});var vn=n(_s);Jr=t(vn,"Processor"),vn.forEach(s),Io.forEach(s),co=f(e),Ae=i(e,"P",{});var Wo=n(Ae);Xr=t(Wo,"Para modelos que suportam tarefas multimodais, \u{1F917} Transformers oferece uma classe processadora que convenientemente cobre um extrator de features e tokenizer dentro de um \xFAnico objeto. Por exemplo, vamos usar o "),gs=i(Wo,"CODE",{});var $n=n(gs);Kr=t($n,"Wav2Vec2Processor"),$n.forEach(s),Yr=t(Wo," para uma tarefa de reconhecimento de fala autom\xE1tica (ASR). ASR transcreve \xE1udio para texto, ent\xE3o voc\xEA ir\xE1 precisar de um extrator de um features e um tokenizer."),Wo.forEach(s),fo=f(e),qa=i(e,"P",{});var qn=n(qa);Zr=t(qn,"Crie um extrator de features para lidar com as entradas de \xE1udio."),qn.forEach(s),ho=f(e),k(na.$$.fragment,e),_o=f(e),ba=i(e,"P",{});var bn=n(ba);ei=t(bn,"Crie um tokenizer para lidar com a entrada de textos:"),bn.forEach(s),go=f(e),k(la.$$.fragment,e),vo=f(e),Oe=i(e,"P",{});var Ro=n(Oe);ai=t(Ro,"Combine o extrator de features e o tokenizer no "),vs=i(Ro,"CODE",{});var En=n(vs);si=t(En,"Wav2Vec2Processor"),En.forEach(s),oi=t(Ro,":"),Ro.forEach(s),$o=f(e),k(pa.$$.fragment,e),qo=f(e),Ea=i(e,"P",{});var jn=n(Ea);ti=t(jn,"Com duas classes b\xE1sicas - configura\xE7\xE3o e modelo - e um preprocessamento de classe adicional (tokenizer, extrator de features, ou processador), voc\xEA pode criar qualquer modelo que suportado por \u{1F917} Transformers. Qualquer uma dessas classes base s\xE3o configur\xE1veis, te permitindo usar os atributos espec\xEDficos que voc\xEA queira. Voc\xEA pode facilmente preparar um modelo para treinamento ou modificar um modelo pr\xE9-treinado com poucas mudan\xE7as."),jn.forEach(s),this.h()},h(){b(m,"name","hf:doc:metadata"),b(m,"content",JSON.stringify(Rn)),b(_,"id","criar-uma-arquitetura-customizada"),b(_,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),b(_,"href","#criar-uma-arquitetura-customizada"),b(u,"class","relative group"),b(B,"href","model_doc/auto"),b(ue,"id","configuration"),b(ue,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),b(ue,"href","#configuration"),b(te,"class","relative group"),b(ca,"href","main_classes/configuration"),b(fa,"href","model_doc/distilbert"),b(ge,"id","modelo"),b(ge,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),b(ge,"href","#modelo"),b(ie,"class","relative group"),b(ha,"href","main_classes/models"),b(He,"href","https://pytorch.org/docs/stable/generated/torch.nn.Module.html"),b(He,"rel","nofollow"),b(Ge,"href","https://www.tensorflow.org/api_docs/python/tf/keras/Model"),b(Ge,"rel","nofollow"),b(Je,"href","https://flax.readthedocs.io/en/latest/flax.linen.html#module"),b(Je,"rel","nofollow"),b($e,"id","heads-do-modelo"),b($e,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),b($e,"href","#heads-do-modelo"),b(ne,"class","relative group"),b(Ee,"id","tokenizer"),b(Ee,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),b(Ee,"href","#tokenizer"),b(le,"class","relative group"),b(_a,"href","main_classes/tokenizer"),b(Ye,"href","https://huggingface.co/docs/tokenizers/python/latest/"),b(Ye,"rel","nofollow"),b(ye,"id","extrator-de-features"),b(ye,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),b(ye,"href","#extrator-de-features"),b(pe,"class","relative group"),b($a,"href","model_doc/vit"),b(Be,"id","processor"),b(Be,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),b(Be,"href","#processor"),b(me,"class","relative group")},m(e,l){a(document.head,m),p(e,g,l),p(e,u,l),a(u,_),a(_,q),w(v,q,null),a(u,$),a(u,y),a(y,E),p(e,N,l),p(e,C,l),a(C,M),a(C,B),a(B,O),a(O,T),a(C,V),a(C,h),a(h,F),a(C,R),a(C,A),a(A,L),a(C,d),p(e,P,l),p(e,W,l),a(W,H),a(H,se),a(W,oe),a(W,wa),a(wa,Ho),a(W,Go),a(W,za),a(za,Jo),a(W,Xo),a(W,xa),a(xa,Ko),a(W,Yo),a(W,Da),a(Da,Zo),p(e,ks,l),p(e,te,l),a(te,ue),a(ue,Ca),w(Me,Ca,null),a(te,et),a(te,ya),a(ya,at),p(e,ws,l),p(e,U,l),a(U,st),a(U,ca),a(ca,ot),a(U,tt),a(U,Ta),a(Ta,rt),a(U,it),a(U,Pa),a(Pa,nt),a(U,lt),a(U,Fa),a(Fa,pt),a(U,mt),a(U,Ba),a(Ba,ut),a(U,dt),p(e,zs,l),p(e,Y,l),a(Y,ct),a(Y,fa),a(fa,ft),a(Y,ht),a(Y,Aa),a(Aa,_t),a(Y,gt),p(e,xs,l),w(Se,e,l),p(e,Ds,l),p(e,re,l),a(re,Oa),a(Oa,vt),a(re,$t),a(re,Va),a(Va,qt),a(re,bt),p(e,Cs,l),p(e,de,l),a(de,Ne),a(Ne,Et),a(Ne,Ma),a(Ma,jt),a(Ne,kt),a(de,wt),a(de,Ie),a(Ie,zt),a(Ie,Sa),a(Sa,xt),a(Ie,Dt),p(e,ys,l),w(We,e,l),p(e,Ts,l),p(e,ce,l),a(ce,Ct),a(ce,Na),a(Na,yt),a(ce,Tt),p(e,Ps,l),w(Re,e,l),p(e,Fs,l),p(e,fe,l),a(fe,Pt),a(fe,Ia),a(Ia,Ft),a(fe,Bt),p(e,Bs,l),w(Le,e,l),p(e,As,l),p(e,he,l),a(he,At),a(he,Wa),a(Wa,Ot),a(he,Vt),p(e,Os,l),w(Qe,e,l),p(e,Vs,l),w(_e,e,l),p(e,Ms,l),p(e,ie,l),a(ie,ge),a(ge,Ra),w(Ue,Ra,null),a(ie,Mt),a(ie,La),a(La,St),p(e,Ss,l),p(e,Q,l),a(Q,Nt),a(Q,ha),a(ha,It),a(Q,Wt),a(Q,Qa),a(Qa,Rt),a(Q,Lt),a(Q,Ua),a(Ua,Qt),a(Q,Ut),a(Q,He),a(He,Ha),a(Ha,Ht),a(Q,Gt),a(Q,Ge),a(Ge,Ga),a(Ga,Jt),a(Q,Xt),a(Q,Je),a(Je,Ja),a(Ja,Kt),a(Q,Yt),p(e,Ns,l),w(ve,e,l),p(e,Is,l),p(e,ne,l),a(ne,$e),a($e,Xa),w(Xe,Xa,null),a(ne,Zt),a(ne,Ka),a(Ka,er),p(e,Ws,l),p(e,qe,l),a(qe,ar),a(qe,Ya),a(Ya,sr),a(qe,or),p(e,Rs,l),w(be,e,l),p(e,Ls,l),p(e,le,l),a(le,Ee),a(Ee,Za),w(Ke,Za,null),a(le,tr),a(le,es),a(es,rr),p(e,Qs,l),p(e,je,l),a(je,ir),a(je,_a),a(_a,nr),a(je,lr),p(e,Us,l),p(e,ke,l),a(ke,ga),a(ga,as),a(as,pr),a(ga,mr),a(ke,ur),a(ke,Z),a(Z,ss),a(ss,dr),a(Z,cr),a(Z,Ye),a(Ye,fr),a(Z,hr),a(Z,os),a(os,_r),a(Z,gr),p(e,Hs,l),p(e,va,l),a(va,vr),p(e,Gs,l),w(we,e,l),p(e,Js,l),p(e,ze,l),a(ze,$r),a(ze,ts),a(ts,qr),a(ze,br),p(e,Xs,l),w(Ze,e,l),p(e,Ks,l),p(e,xe,l),a(xe,Er),a(xe,rs),a(rs,jr),a(xe,kr),p(e,Ys,l),w(ea,e,l),p(e,Zs,l),p(e,De,l),a(De,wr),a(De,is),a(is,zr),a(De,xr),p(e,eo,l),w(aa,e,l),p(e,ao,l),w(Ce,e,l),p(e,so,l),p(e,pe,l),a(pe,ye),a(ye,ns),w(sa,ns,null),a(pe,Dr),a(pe,ls),a(ls,Cr),p(e,oo,l),p(e,J,l),a(J,yr),a(J,ps),a(ps,Tr),a(J,Pr),a(J,ms),a(ms,Fr),a(J,Br),a(J,us),a(us,Ar),a(J,Or),p(e,to,l),p(e,ee,l),a(ee,Vr),a(ee,ds),a(ds,Mr),a(ee,Sr),a(ee,$a),a($a,Nr),a(ee,Ir),p(e,ro,l),w(oa,e,l),p(e,io,l),w(Te,e,l),p(e,no,l),p(e,Pe,l),a(Pe,Wr),a(Pe,cs),a(cs,Rr),a(Pe,Lr),p(e,lo,l),w(ta,e,l),p(e,po,l),p(e,Fe,l),a(Fe,Qr),a(Fe,fs),a(fs,Ur),a(Fe,Hr),p(e,mo,l),w(ra,e,l),p(e,uo,l),p(e,me,l),a(me,Be),a(Be,hs),w(ia,hs,null),a(me,Gr),a(me,_s),a(_s,Jr),p(e,co,l),p(e,Ae,l),a(Ae,Xr),a(Ae,gs),a(gs,Kr),a(Ae,Yr),p(e,fo,l),p(e,qa,l),a(qa,Zr),p(e,ho,l),w(na,e,l),p(e,_o,l),p(e,ba,l),a(ba,ei),p(e,go,l),w(la,e,l),p(e,vo,l),p(e,Oe,l),a(Oe,ai),a(Oe,vs),a(vs,si),a(Oe,oi),p(e,$o,l),w(pa,e,l),p(e,qo,l),p(e,Ea,l),a(Ea,ti),bo=!0},p(e,[l]){const ma={};l&2&&(ma.$$scope={dirty:l,ctx:e}),_e.$set(ma);const $s={};l&2&&($s.$$scope={dirty:l,ctx:e}),ve.$set($s);const qs={};l&2&&(qs.$$scope={dirty:l,ctx:e}),be.$set(qs);const bs={};l&2&&(bs.$$scope={dirty:l,ctx:e}),we.$set(bs);const K={};l&2&&(K.$$scope={dirty:l,ctx:e}),Ce.$set(K);const Es={};l&2&&(Es.$$scope={dirty:l,ctx:e}),Te.$set(Es)},i(e){bo||(z(v.$$.fragment,e),z(Me.$$.fragment,e),z(Se.$$.fragment,e),z(We.$$.fragment,e),z(Re.$$.fragment,e),z(Le.$$.fragment,e),z(Qe.$$.fragment,e),z(_e.$$.fragment,e),z(Ue.$$.fragment,e),z(ve.$$.fragment,e),z(Xe.$$.fragment,e),z(be.$$.fragment,e),z(Ke.$$.fragment,e),z(we.$$.fragment,e),z(Ze.$$.fragment,e),z(ea.$$.fragment,e),z(aa.$$.fragment,e),z(Ce.$$.fragment,e),z(sa.$$.fragment,e),z(oa.$$.fragment,e),z(Te.$$.fragment,e),z(ta.$$.fragment,e),z(ra.$$.fragment,e),z(ia.$$.fragment,e),z(na.$$.fragment,e),z(la.$$.fragment,e),z(pa.$$.fragment,e),bo=!0)},o(e){x(v.$$.fragment,e),x(Me.$$.fragment,e),x(Se.$$.fragment,e),x(We.$$.fragment,e),x(Re.$$.fragment,e),x(Le.$$.fragment,e),x(Qe.$$.fragment,e),x(_e.$$.fragment,e),x(Ue.$$.fragment,e),x(ve.$$.fragment,e),x(Xe.$$.fragment,e),x(be.$$.fragment,e),x(Ke.$$.fragment,e),x(we.$$.fragment,e),x(Ze.$$.fragment,e),x(ea.$$.fragment,e),x(aa.$$.fragment,e),x(Ce.$$.fragment,e),x(sa.$$.fragment,e),x(oa.$$.fragment,e),x(Te.$$.fragment,e),x(ta.$$.fragment,e),x(ra.$$.fragment,e),x(ia.$$.fragment,e),x(na.$$.fragment,e),x(la.$$.fragment,e),x(pa.$$.fragment,e),bo=!1},d(e){s(m),e&&s(g),e&&s(u),D(v),e&&s(N),e&&s(C),e&&s(P),e&&s(W),e&&s(ks),e&&s(te),D(Me),e&&s(ws),e&&s(U),e&&s(zs),e&&s(Y),e&&s(xs),D(Se,e),e&&s(Ds),e&&s(re),e&&s(Cs),e&&s(de),e&&s(ys),D(We,e),e&&s(Ts),e&&s(ce),e&&s(Ps),D(Re,e),e&&s(Fs),e&&s(fe),e&&s(Bs),D(Le,e),e&&s(As),e&&s(he),e&&s(Os),D(Qe,e),e&&s(Vs),D(_e,e),e&&s(Ms),e&&s(ie),D(Ue),e&&s(Ss),e&&s(Q),e&&s(Ns),D(ve,e),e&&s(Is),e&&s(ne),D(Xe),e&&s(Ws),e&&s(qe),e&&s(Rs),D(be,e),e&&s(Ls),e&&s(le),D(Ke),e&&s(Qs),e&&s(je),e&&s(Us),e&&s(ke),e&&s(Hs),e&&s(va),e&&s(Gs),D(we,e),e&&s(Js),e&&s(ze),e&&s(Xs),D(Ze,e),e&&s(Ks),e&&s(xe),e&&s(Ys),D(ea,e),e&&s(Zs),e&&s(De),e&&s(eo),D(aa,e),e&&s(ao),D(Ce,e),e&&s(so),e&&s(pe),D(sa),e&&s(oo),e&&s(J),e&&s(to),e&&s(ee),e&&s(ro),D(oa,e),e&&s(io),D(Te,e),e&&s(no),e&&s(Pe),e&&s(lo),D(ta,e),e&&s(po),e&&s(Fe),e&&s(mo),D(ra,e),e&&s(uo),e&&s(me),D(ia),e&&s(co),e&&s(Ae),e&&s(fo),e&&s(qa),e&&s(ho),D(na,e),e&&s(_o),e&&s(ba),e&&s(go),D(la,e),e&&s(vo),e&&s(Oe),e&&s($o),D(pa,e),e&&s(qo),e&&s(Ea)}}}const Rn={local:"criar-uma-arquitetura-customizada",sections:[{local:"configuration",title:"configuration"},{local:"modelo",sections:[{local:"heads-do-modelo",title:"Heads do modelo"}],title:"Modelo"},{local:"tokenizer",title:"Tokenizer"},{local:"extrator-de-features",title:"Extrator de features"},{local:"processor",title:"Processor"}],title:"Criar uma arquitetura customizada"};function Ln(S){return Cn(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class Xn extends wn{constructor(m){super();zn(this,m,Ln,Wn,xn,{})}}export{Xn as default,Rn as metadata};
484
0
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/_app
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/_app/pages/quicktour.mdx-hf-doc-builder.js
import{S as yp,i as Cp,s as zp,e as n,k as d,w as E,t as r,M as Pp,c as i,d as t,m as $,a as p,x as k,h as l,b,G as a,g as c,y as w,q as j,o as A,B as q,v as Op,L as ye}from"../chunks/vendor-hf-doc-builder.js";import{T as vt}from"../chunks/Tip-hf-doc-builder.js";import{Y as xp}from"../chunks/Youtube-hf-doc-builder.js";import{I as fa}from"../chunks/IconCopyLink-hf-doc-builder.js";import{C as L}from"../chunks/CodeBlock-hf-doc-builder.js";import{D as Mp}from"../chunks/DocNotebookDropdown-hf-doc-builder.js";import{F as gt,M as _e}from"../chunks/Markdown-hf-doc-builder.js";function Dp(P){let s,u;return{c(){s=n("p"),u=r("Todos os exemplos de c\xF3digo apresentados na documenta\xE7\xE3o t\xEAm um bot\xE3o no canto superior direito para escolher se voc\xEA deseja ocultar ou mostrar o c\xF3digo no Pytorch ou no TensorFlow. Caso contr\xE1rio, \xE9 esperado que funcione para ambos back-ends sem nenhuma altera\xE7\xE3o.")},l(o){s=i(o,"P",{});var f=p(s);u=l(f,"Todos os exemplos de c\xF3digo apresentados na documenta\xE7\xE3o t\xEAm um bot\xE3o no canto superior direito para escolher se voc\xEA deseja ocultar ou mostrar o c\xF3digo no Pytorch ou no TensorFlow. Caso contr\xE1rio, \xE9 esperado que funcione para ambos back-ends sem nenhuma altera\xE7\xE3o."),f.forEach(t)},m(o,f){c(o,s,f),a(s,u)},d(o){o&&t(s)}}}function Sp(P){let s,u,o,f,h,g,C,M;return{c(){s=n("p"),u=r("Para mais detalhes sobre a "),o=n("code"),f=r("pipeline()"),h=r(" e tarefas associadas, siga a documenta\xE7\xE3o "),g=n("a"),C=r("aqui"),M=r("."),this.h()},l(x){s=i(x,"P",{});var O=p(s);u=l(O,"Para mais detalhes sobre a "),o=i(O,"CODE",{});var S=p(o);f=l(S,"pipeline()"),S.forEach(t),h=l(O," e tarefas associadas, siga a documenta\xE7\xE3o "),g=i(O,"A",{href:!0});var I=p(g);C=l(I,"aqui"),I.forEach(t),M=l(O,"."),O.forEach(t),this.h()},h(){b(g,"href","./main_classes/pipelines")},m(x,O){c(x,s,O),a(s,u),a(s,o),a(o,f),a(s,h),a(s,g),a(g,C),a(s,M)},d(x){x&&t(s)}}}function Ip(P){let s,u;return s=new L({props:{code:"pip install torch",highlighted:"pip install torch"}}),{c(){E(s.$$.fragment)},l(o){k(s.$$.fragment,o)},m(o,f){w(s,o,f),u=!0},p:ye,i(o){u||(j(s.$$.fragment,o),u=!0)},o(o){A(s.$$.fragment,o),u=!1},d(o){q(s,o)}}}function Fp(P){let s,u;return s=new _e({props:{$$slots:{default:[Ip]},$$scope:{ctx:P}}}),{c(){E(s.$$.fragment)},l(o){k(s.$$.fragment,o)},m(o,f){w(s,o,f),u=!0},p(o,f){const h={};f&2&&(h.$$scope={dirty:f,ctx:o}),s.$set(h)},i(o){u||(j(s.$$.fragment,o),u=!0)},o(o){A(s.$$.fragment,o),u=!1},d(o){q(s,o)}}}function Np(P){let s,u;return s=new L({props:{code:"pip install tensorflow",highlighted:"pip install tensorflow"}}),{c(){E(s.$$.fragment)},l(o){k(s.$$.fragment,o)},m(o,f){w(s,o,f),u=!0},p:ye,i(o){u||(j(s.$$.fragment,o),u=!0)},o(o){A(s.$$.fragment,o),u=!1},d(o){q(s,o)}}}function Up(P){let s,u;return s=new _e({props:{$$slots:{default:[Np]},$$scope:{ctx:P}}}),{c(){E(s.$$.fragment)},l(o){k(s.$$.fragment,o)},m(o,f){w(s,o,f),u=!0},p(o,f){const h={};f&2&&(h.$$scope={dirty:f,ctx:o}),s.$set(h)},i(o){u||(j(s.$$.fragment,o),u=!0)},o(o){A(s.$$.fragment,o),u=!1},d(o){q(s,o)}}}function Hp(P){let s,u,o,f,h,g,C,M,x,O,S,I,F,U;return F=new L({props:{code:`from transformers import AutoTokenizer, AutoModelForSequenceClassification model = AutoModelForSequenceClassification.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, AutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSequenceClassification.from_pretrained(model_name) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(model_name)`}}),{c(){s=n("p"),u=r("Use o "),o=n("code"),f=r("AutoModelForSequenceClassification"),h=r(" e "),g=n("code"),C=r("AutoTokenizer"),M=r(" para carregar o modelo pr\xE9-treinado e seu tokenizer associado (mais em "),x=n("code"),O=r("AutoClass"),S=r(" abaixo):"),I=d(),E(F.$$.fragment)},l(z){s=i(z,"P",{});var _=p(s);u=l(_,"Use o "),o=i(_,"CODE",{});var y=p(o);f=l(y,"AutoModelForSequenceClassification"),y.forEach(t),h=l(_," e "),g=i(_,"CODE",{});var D=p(g);C=l(D,"AutoTokenizer"),D.forEach(t),M=l(_," para carregar o modelo pr\xE9-treinado e seu tokenizer associado (mais em "),x=i(_,"CODE",{});var W=p(x);O=l(W,"AutoClass"),W.forEach(t),S=l(_," abaixo):"),_.forEach(t),I=$(z),k(F.$$.fragment,z)},m(z,_){c(z,s,_),a(s,u),a(s,o),a(o,f),a(s,h),a(s,g),a(g,C),a(s,M),a(s,x),a(x,O),a(s,S),c(z,I,_),w(F,z,_),U=!0},p:ye,i(z){U||(j(F.$$.fragment,z),U=!0)},o(z){A(F.$$.fragment,z),U=!1},d(z){z&&t(s),z&&t(I),q(F,z)}}}function Lp(P){let s,u;return s=new _e({props:{$$slots:{default:[Hp]},$$scope:{ctx:P}}}),{c(){E(s.$$.fragment)},l(o){k(s.$$.fragment,o)},m(o,f){w(s,o,f),u=!0},p(o,f){const h={};f&2&&(h.$$scope={dirty:f,ctx:o}),s.$set(h)},i(o){u||(j(s.$$.fragment,o),u=!0)},o(o){A(s.$$.fragment,o),u=!1},d(o){q(s,o)}}}function Rp(P){let s,u,o,f,h,g,C,M,x,O,S,I,F,U;return F=new L({props:{code:`from transformers import AutoTokenizer, TFAutoModelForSequenceClassification model = TFAutoModelForSequenceClassification.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, TFAutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForSequenceClassification.from_pretrained(model_name) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(model_name)`}}),{c(){s=n("p"),u=r("Use o "),o=n("code"),f=r("TFAutoModelForSequenceClassification"),h=r(" and "),g=n("code"),C=r("AutoTokenizer"),M=r(" para carregar o modelo pr\xE9-treinado e o tokenizer associado (mais em "),x=n("code"),O=r("TFAutoClass"),S=r(" abaixo):"),I=d(),E(F.$$.fragment)},l(z){s=i(z,"P",{});var _=p(s);u=l(_,"Use o "),o=i(_,"CODE",{});var y=p(o);f=l(y,"TFAutoModelForSequenceClassification"),y.forEach(t),h=l(_," and "),g=i(_,"CODE",{});var D=p(g);C=l(D,"AutoTokenizer"),D.forEach(t),M=l(_," para carregar o modelo pr\xE9-treinado e o tokenizer associado (mais em "),x=i(_,"CODE",{});var W=p(x);O=l(W,"TFAutoClass"),W.forEach(t),S=l(_," abaixo):"),_.forEach(t),I=$(z),k(F.$$.fragment,z)},m(z,_){c(z,s,_),a(s,u),a(s,o),a(o,f),a(s,h),a(s,g),a(g,C),a(s,M),a(s,x),a(x,O),a(s,S),c(z,I,_),w(F,z,_),U=!0},p:ye,i(z){U||(j(F.$$.fragment,z),U=!0)},o(z){A(F.$$.fragment,z),U=!1},d(z){z&&t(s),z&&t(I),q(F,z)}}}function Wp(P){let s,u;return s=new _e({props:{$$slots:{default:[Rp]},$$scope:{ctx:P}}}),{c(){E(s.$$.fragment)},l(o){k(s.$$.fragment,o)},m(o,f){w(s,o,f),u=!0},p(o,f){const h={};f&2&&(h.$$scope={dirty:f,ctx:o}),s.$set(h)},i(o){u||(j(s.$$.fragment,o),u=!0)},o(o){A(s.$$.fragment,o),u=!1},d(o){q(s,o)}}}function Vp(P){let s,u;return s=new L({props:{code:`pt_batch = tokenizer( ["We are very happy to show you the \u{1F917} transformers library.", "We hope you don't hate it."], padding=True, truncation=True, max_length=512, return_tensors="pt", )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>pt_batch = tokenizer( <span class="hljs-meta">... </span> [<span class="hljs-string">&quot;We are very happy to show you the \u{1F917} transformers library.&quot;</span>, <span class="hljs-string">&quot;We hope you don&#x27;t hate it.&quot;</span>], <span class="hljs-meta">... </span> padding=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> truncation=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> max_length=<span class="hljs-number">512</span>, <span class="hljs-meta">... </span> return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, <span class="hljs-meta">... </span>)`}}),{c(){E(s.$$.fragment)},l(o){k(s.$$.fragment,o)},m(o,f){w(s,o,f),u=!0},p:ye,i(o){u||(j(s.$$.fragment,o),u=!0)},o(o){A(s.$$.fragment,o),u=!1},d(o){q(s,o)}}}function Gp(P){let s,u;return s=new _e({props:{$$slots:{default:[Vp]},$$scope:{ctx:P}}}),{c(){E(s.$$.fragment)},l(o){k(s.$$.fragment,o)},m(o,f){w(s,o,f),u=!0},p(o,f){const h={};f&2&&(h.$$scope={dirty:f,ctx:o}),s.$set(h)},i(o){u||(j(s.$$.fragment,o),u=!0)},o(o){A(s.$$.fragment,o),u=!1},d(o){q(s,o)}}}function Bp(P){let s,u;return s=new L({props:{code:`tf_batch = tokenizer( ["We are very happy to show you the \u{1F917} Transformers library.", "We hope you don't hate it."], padding=True, truncation=True, max_length=512, return_tensors="tf", )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>tf_batch = tokenizer( <span class="hljs-meta">... </span> [<span class="hljs-string">&quot;We are very happy to show you the \u{1F917} Transformers library.&quot;</span>, <span class="hljs-string">&quot;We hope you don&#x27;t hate it.&quot;</span>], <span class="hljs-meta">... </span> padding=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> truncation=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> max_length=<span class="hljs-number">512</span>, <span class="hljs-meta">... </span> return_tensors=<span class="hljs-string">&quot;tf&quot;</span>, <span class="hljs-meta">... </span>)`}}),{c(){E(s.$$.fragment)},l(o){k(s.$$.fragment,o)},m(o,f){w(s,o,f),u=!0},p:ye,i(o){u||(j(s.$$.fragment,o),u=!0)},o(o){A(s.$$.fragment,o),u=!1},d(o){q(s,o)}}}function Yp(P){let s,u;return s=new _e({props:{$$slots:{default:[Bp]},$$scope:{ctx:P}}}),{c(){E(s.$$.fragment)},l(o){k(s.$$.fragment,o)},m(o,f){w(s,o,f),u=!0},p(o,f){const h={};f&2&&(h.$$scope={dirty:f,ctx:o}),s.$set(h)},i(o){u||(j(s.$$.fragment,o),u=!0)},o(o){A(s.$$.fragment,o),u=!1},d(o){q(s,o)}}}function Jp(P){let s,u,o,f,h,g,C,M;return{c(){s=n("p"),u=r("Veja o "),o=n("a"),f=r("sum\xE1rio de tarefas"),h=r(" para qual classe de "),g=n("code"),C=r("AutoModel"),M=r(" usar para cada tarefa."),this.h()},l(x){s=i(x,"P",{});var O=p(s);u=l(O,"Veja o "),o=i(O,"A",{href:!0});var S=p(o);f=l(S,"sum\xE1rio de tarefas"),S.forEach(t),h=l(O," para qual classe de "),g=i(O,"CODE",{});var I=p(g);C=l(I,"AutoModel"),I.forEach(t),M=l(O," usar para cada tarefa."),O.forEach(t),this.h()},h(){b(o,"href","./task_summary")},m(x,O){c(x,s,O),a(s,u),a(s,o),a(o,f),a(s,h),a(s,g),a(g,C),a(s,M)},d(x){x&&t(s)}}}function Qp(P){let s,u,o,f,h,g,C,M,x,O,S,I,F,U,z,_,y,D,W,R,oe,Y,ee,J,V,X,Q,K,ue,re,de,le,ae,ne,$e,T,N,ie;return _=new L({props:{code:`from transformers import AutoModelForSequenceClassification model_name = "nlptown/bert-base-multilingual-uncased-sentiment" pt_model = AutoModelForSequenceClassification.from_pretrained(model_name)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model_name = <span class="hljs-string">&quot;nlptown/bert-base-multilingual-uncased-sentiment&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>pt_model = AutoModelForSequenceClassification.from_pretrained(model_name)`}}),D=new vt({props:{$$slots:{default:[Jp]},$$scope:{ctx:P}}}),X=new L({props:{code:"pt_outputs = pt_model(**pt_batch)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>pt_outputs = pt_model(**pt_batch)'}}),N=new L({props:{code:`from torch import nn pt_predictions = nn.functional.softmax(pt_outputs.logits, dim=-1) print(pt_predictions)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> torch <span class="hljs-keyword">import</span> nn <span class="hljs-meta">&gt;&gt;&gt; </span>pt_predictions = nn.functional.softmax(pt_outputs.logits, dim=-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(pt_predictions) tensor([[<span class="hljs-number">0.0021</span>, <span class="hljs-number">0.0018</span>, <span class="hljs-number">0.0115</span>, <span class="hljs-number">0.2121</span>, <span class="hljs-number">0.7725</span>], [<span class="hljs-number">0.2084</span>, <span class="hljs-number">0.1826</span>, <span class="hljs-number">0.1969</span>, <span class="hljs-number">0.1755</span>, <span class="hljs-number">0.2365</span>]], grad_fn=&lt;SoftmaxBackward0&gt;)`}}),{c(){s=n("p"),u=r("\u{1F917} Transformers fornecem uma maneira simples e unificada de carregar inst\xE2ncias pr\xE9-treinadas. Isso significa que voc\xEA pode carregar um "),o=n("code"),f=r("AutoModel"),h=r(" como carregaria um "),g=n("code"),C=r("AutoTokenizer"),M=r(". A \xFAnica diferen\xE7a \xE9 selecionar o "),x=n("code"),O=r("AutoModel"),S=r(" correto para a tarefa. Como voc\xEA est\xE1 fazendo classifica\xE7\xE3o de texto ou sequ\xEAncia, carregue "),I=n("code"),F=r("AutoModelForSequenceClassification"),U=r(":"),z=d(),E(_.$$.fragment),y=d(),E(D.$$.fragment),W=d(),R=n("p"),oe=r("Agora voc\xEA pode passar seu grupo de entradas pr\xE9-processadas diretamente para o modelo. Voc\xEA apenas tem que descompactar o dicion\xE1rio usando "),Y=n("code"),ee=r("**"),J=r(":"),V=d(),E(X.$$.fragment),Q=d(),K=n("p"),ue=r("O modelo gera as ativa\xE7\xF5es finais no atributo "),re=n("code"),de=r("logits"),le=r(". Aplique a fun\xE7\xE3o softmax aos "),ae=n("code"),ne=r("logits"),$e=r(" para recuperar as probabilidades:"),T=d(),E(N.$$.fragment)},l(v){s=i(v,"P",{});var H=p(s);u=l(H,"\u{1F917} Transformers fornecem uma maneira simples e unificada de carregar inst\xE2ncias pr\xE9-treinadas. Isso significa que voc\xEA pode carregar um "),o=i(H,"CODE",{});var me=p(o);f=l(me,"AutoModel"),me.forEach(t),h=l(H," como carregaria um "),g=i(H,"CODE",{});var Ce=p(g);C=l(Ce,"AutoTokenizer"),Ce.forEach(t),M=l(H,". A \xFAnica diferen\xE7a \xE9 selecionar o "),x=i(H,"CODE",{});var fe=p(x);O=l(fe,"AutoModel"),fe.forEach(t),S=l(H," correto para a tarefa. Como voc\xEA est\xE1 fazendo classifica\xE7\xE3o de texto ou sequ\xEAncia, carregue "),I=i(H,"CODE",{});var ge=p(I);F=l(ge,"AutoModelForSequenceClassification"),ge.forEach(t),U=l(H,":"),H.forEach(t),z=$(v),k(_.$$.fragment,v),y=$(v),k(D.$$.fragment,v),W=$(v),R=i(v,"P",{});var pe=p(R);oe=l(pe,"Agora voc\xEA pode passar seu grupo de entradas pr\xE9-processadas diretamente para o modelo. Voc\xEA apenas tem que descompactar o dicion\xE1rio usando "),Y=i(pe,"CODE",{});var Fe=p(Y);ee=l(Fe,"**"),Fe.forEach(t),J=l(pe,":"),pe.forEach(t),V=$(v),k(X.$$.fragment,v),Q=$(v),K=i(v,"P",{});var ve=p(K);ue=l(ve,"O modelo gera as ativa\xE7\xF5es finais no atributo "),re=i(ve,"CODE",{});var Ba=p(re);de=l(Ba,"logits"),Ba.forEach(t),le=l(ve,". Aplique a fun\xE7\xE3o softmax aos "),ae=i(ve,"CODE",{});var da=p(ae);ne=l(da,"logits"),da.forEach(t),$e=l(ve," para recuperar as probabilidades:"),ve.forEach(t),T=$(v),k(N.$$.fragment,v)},m(v,H){c(v,s,H),a(s,u),a(s,o),a(o,f),a(s,h),a(s,g),a(g,C),a(s,M),a(s,x),a(x,O),a(s,S),a(s,I),a(I,F),a(s,U),c(v,z,H),w(_,v,H),c(v,y,H),w(D,v,H),c(v,W,H),c(v,R,H),a(R,oe),a(R,Y),a(Y,ee),a(R,J),c(v,V,H),w(X,v,H),c(v,Q,H),c(v,K,H),a(K,ue),a(K,re),a(re,de),a(K,le),a(K,ae),a(ae,ne),a(K,$e),c(v,T,H),w(N,v,H),ie=!0},p(v,H){const me={};H&2&&(me.$$scope={dirty:H,ctx:v}),D.$set(me)},i(v){ie||(j(_.$$.fragment,v),j(D.$$.fragment,v),j(X.$$.fragment,v),j(N.$$.fragment,v),ie=!0)},o(v){A(_.$$.fragment,v),A(D.$$.fragment,v),A(X.$$.fragment,v),A(N.$$.fragment,v),ie=!1},d(v){v&&t(s),v&&t(z),q(_,v),v&&t(y),q(D,v),v&&t(W),v&&t(R),v&&t(V),q(X,v),v&&t(Q),v&&t(K),v&&t(T),q(N,v)}}}function Kp(P){let s,u;return s=new _e({props:{$$slots:{default:[Qp]},$$scope:{ctx:P}}}),{c(){E(s.$$.fragment)},l(o){k(s.$$.fragment,o)},m(o,f){w(s,o,f),u=!0},p(o,f){const h={};f&2&&(h.$$scope={dirty:f,ctx:o}),s.$set(h)},i(o){u||(j(s.$$.fragment,o),u=!0)},o(o){A(s.$$.fragment,o),u=!1},d(o){q(s,o)}}}function Zp(P){let s,u,o,f,h,g,C,M;return{c(){s=n("p"),u=r("Veja o "),o=n("a"),f=r("sum\xE1rio de tarefas"),h=r(" para qual classe de "),g=n("code"),C=r("AutoModel"),M=r(" usar para cada tarefa."),this.h()},l(x){s=i(x,"P",{});var O=p(s);u=l(O,"Veja o "),o=i(O,"A",{href:!0});var S=p(o);f=l(S,"sum\xE1rio de tarefas"),S.forEach(t),h=l(O," para qual classe de "),g=i(O,"CODE",{});var I=p(g);C=l(I,"AutoModel"),I.forEach(t),M=l(O," usar para cada tarefa."),O.forEach(t),this.h()},h(){b(o,"href","./task_summary")},m(x,O){c(x,s,O),a(s,u),a(s,o),a(o,f),a(s,h),a(s,g),a(g,C),a(s,M)},d(x){x&&t(s)}}}function Xp(P){let s,u,o,f,h,g,C,M,x,O,S,I,F,U,z,_,y,D,W,R,oe,Y,ee,J,V,X,Q,K,ue,re,de,le,ae,ne,$e;return _=new L({props:{code:`from transformers import TFAutoModelForSequenceClassification model_name = "nlptown/bert-base-multilingual-uncased-sentiment" tf_model = TFAutoModelForSequenceClassification.from_pretrained(model_name)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model_name = <span class="hljs-string">&quot;nlptown/bert-base-multilingual-uncased-sentiment&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tf_model = TFAutoModelForSequenceClassification.from_pretrained(model_name)`}}),D=new vt({props:{$$slots:{default:[Zp]},$$scope:{ctx:P}}}),ee=new L({props:{code:"tf_outputs = tf_model(tf_batch)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>tf_outputs = tf_model(tf_batch)'}}),ne=new L({props:{code:`import tensorflow as tf tf_predictions = tf.nn.softmax(tf_outputs.logits, axis=-1) tf_predictions`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tf_predictions = tf.nn.softmax(tf_outputs.logits, axis=-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_predictions`}}),{c(){s=n("p"),u=r("\u{1F917} Transformers fornecem uma maneira simples e unificada de carregar inst\xE2ncias pr\xE9-treinadas. Isso significa que voc\xEA pode carregar um "),o=n("code"),f=r("TFAutoModel"),h=r(" como carregaria um "),g=n("code"),C=r("AutoTokenizer"),M=r(". A \xFAnica diferen\xE7a \xE9 selecionar o "),x=n("code"),O=r("TFAutoModel"),S=r(" correto para a tarefa. Como voc\xEA est\xE1 fazendo classifica\xE7\xE3o de texto ou sequ\xEAncia, carregue "),I=n("code"),F=r("TFAutoModelForSequenceClassification"),U=r(":"),z=d(),E(_.$$.fragment),y=d(),E(D.$$.fragment),W=d(),R=n("p"),oe=r("Agora voc\xEA pode passar seu grupo de entradas pr\xE9-processadas diretamente para o modelo atrav\xE9s da passagem de chaves de dicion\xE1rios ao tensor."),Y=d(),E(ee.$$.fragment),J=d(),V=n("p"),X=r("O modelo gera as ativa\xE7\xF5es finais no atributo "),Q=n("code"),K=r("logits"),ue=r(". Aplique a fun\xE7\xE3o softmax aos "),re=n("code"),de=r("logits"),le=r(" para recuperar as probabilidades:"),ae=d(),E(ne.$$.fragment)},l(T){s=i(T,"P",{});var N=p(s);u=l(N,"\u{1F917} Transformers fornecem uma maneira simples e unificada de carregar inst\xE2ncias pr\xE9-treinadas. Isso significa que voc\xEA pode carregar um "),o=i(N,"CODE",{});var ie=p(o);f=l(ie,"TFAutoModel"),ie.forEach(t),h=l(N," como carregaria um "),g=i(N,"CODE",{});var v=p(g);C=l(v,"AutoTokenizer"),v.forEach(t),M=l(N,". A \xFAnica diferen\xE7a \xE9 selecionar o "),x=i(N,"CODE",{});var H=p(x);O=l(H,"TFAutoModel"),H.forEach(t),S=l(N," correto para a tarefa. Como voc\xEA est\xE1 fazendo classifica\xE7\xE3o de texto ou sequ\xEAncia, carregue "),I=i(N,"CODE",{});var me=p(I);F=l(me,"TFAutoModelForSequenceClassification"),me.forEach(t),U=l(N,":"),N.forEach(t),z=$(T),k(_.$$.fragment,T),y=$(T),k(D.$$.fragment,T),W=$(T),R=i(T,"P",{});var Ce=p(R);oe=l(Ce,"Agora voc\xEA pode passar seu grupo de entradas pr\xE9-processadas diretamente para o modelo atrav\xE9s da passagem de chaves de dicion\xE1rios ao tensor."),Ce.forEach(t),Y=$(T),k(ee.$$.fragment,T),J=$(T),V=i(T,"P",{});var fe=p(V);X=l(fe,"O modelo gera as ativa\xE7\xF5es finais no atributo "),Q=i(fe,"CODE",{});var ge=p(Q);K=l(ge,"logits"),ge.forEach(t),ue=l(fe,". Aplique a fun\xE7\xE3o softmax aos "),re=i(fe,"CODE",{});var pe=p(re);de=l(pe,"logits"),pe.forEach(t),le=l(fe," para recuperar as probabilidades:"),fe.forEach(t),ae=$(T),k(ne.$$.fragment,T)},m(T,N){c(T,s,N),a(s,u),a(s,o),a(o,f),a(s,h),a(s,g),a(g,C),a(s,M),a(s,x),a(x,O),a(s,S),a(s,I),a(I,F),a(s,U),c(T,z,N),w(_,T,N),c(T,y,N),w(D,T,N),c(T,W,N),c(T,R,N),a(R,oe),c(T,Y,N),w(ee,T,N),c(T,J,N),c(T,V,N),a(V,X),a(V,Q),a(Q,K),a(V,ue),a(V,re),a(re,de),a(V,le),c(T,ae,N),w(ne,T,N),$e=!0},p(T,N){const ie={};N&2&&(ie.$$scope={dirty:N,ctx:T}),D.$set(ie)},i(T){$e||(j(_.$$.fragment,T),j(D.$$.fragment,T),j(ee.$$.fragment,T),j(ne.$$.fragment,T),$e=!0)},o(T){A(_.$$.fragment,T),A(D.$$.fragment,T),A(ee.$$.fragment,T),A(ne.$$.fragment,T),$e=!1},d(T){T&&t(s),T&&t(z),q(_,T),T&&t(y),q(D,T),T&&t(W),T&&t(R),T&&t(Y),q(ee,T),T&&t(J),T&&t(V),T&&t(ae),q(ne,T)}}}function em(P){let s,u;return s=new _e({props:{$$slots:{default:[Xp]},$$scope:{ctx:P}}}),{c(){E(s.$$.fragment)},l(o){k(s.$$.fragment,o)},m(o,f){w(s,o,f),u=!0},p(o,f){const h={};f&2&&(h.$$scope={dirty:f,ctx:o}),s.$set(h)},i(o){u||(j(s.$$.fragment,o),u=!0)},o(o){A(s.$$.fragment,o),u=!1},d(o){q(s,o)}}}function am(P){let s,u,o,f,h;return{c(){s=n("p"),u=r("Todos os modelos de \u{1F917} Transformers (PyTorch ou TensorFlow) geram tensores "),o=n("em"),f=r("antes"),h=r(" da fun\xE7\xE3o de ativa\xE7\xE3o final (como softmax) pois essa fun\xE7\xE3o algumas vezes \xE9 fundida com a perda.")},l(g){s=i(g,"P",{});var C=p(s);u=l(C,"Todos os modelos de \u{1F917} Transformers (PyTorch ou TensorFlow) geram tensores "),o=i(C,"EM",{});var M=p(o);f=l(M,"antes"),M.forEach(t),h=l(C," da fun\xE7\xE3o de ativa\xE7\xE3o final (como softmax) pois essa fun\xE7\xE3o algumas vezes \xE9 fundida com a perda."),C.forEach(t)},m(g,C){c(g,s,C),a(s,u),a(s,o),a(o,f),a(s,h)},d(g){g&&t(s)}}}function tm(P){let s,u,o,f,h;return{c(){s=n("p"),u=r(`As sa\xEDdas do modelo \u{1F917} Transformers s\xE3o classes de dados especiais para que seus atributos sejam preenchidos automaticamente em um IDE. As sa\xEDdas do modelo tamb\xE9m se comportam como uma tupla ou um dicion\xE1rio (por exemplo, voc\xEA pode indexar com um inteiro, uma parte ou uma string), caso em que os atributos `),o=n("code"),f=r("None"),h=r(" s\xE3o ignorados.")},l(g){s=i(g,"P",{});var C=p(s);u=l(C,`As sa\xEDdas do modelo \u{1F917} Transformers s\xE3o classes de dados especiais para que seus atributos sejam preenchidos automaticamente em um IDE. As sa\xEDdas do modelo tamb\xE9m se comportam como uma tupla ou um dicion\xE1rio (por exemplo, voc\xEA pode indexar com um inteiro, uma parte ou uma string), caso em que os atributos `),o=i(C,"CODE",{});var M=p(o);f=l(M,"None"),M.forEach(t),h=l(C," s\xE3o ignorados."),C.forEach(t)},m(g,C){c(g,s,C),a(s,u),a(s,o),a(o,f),a(s,h)},d(g){g&&t(s)}}}function sm(P){let s,u,o,f,h,g,C,M,x,O,S,I,F,U,z,_;return C=new L({props:{code:`pt_save_directory = "./pt_save_pretrained" tokenizer.save_pretrained(pt_save_directory) pt_model.save_pretrained(pt_save_directory)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>pt_save_directory = <span class="hljs-string">&quot;./pt_save_pretrained&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.save_pretrained(pt_save_directory) <span class="hljs-meta">&gt;&gt;&gt; </span>pt_model.save_pretrained(pt_save_directory)`}}),z=new L({props:{code:'pt_model = AutoModelForSequenceClassification.from_pretrained("./pt_save_pretrained")',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>pt_model = AutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;./pt_save_pretrained&quot;</span>)'}}),{c(){s=n("p"),u=r("Uma vez que seu modelo estiver afinado, voc\xEA pode salv\xE1-lo com seu Tokenizer usando "),o=n("code"),f=r("PreTrainedModel.save_pretrained()"),h=r(":"),g=d(),E(C.$$.fragment),M=d(),x=n("p"),O=r("Quando voc\xEA estiver pronto para us\xE1-lo novamente, recarregue com "),S=n("code"),I=r("PreTrainedModel.from_pretrained()"),F=r(":"),U=d(),E(z.$$.fragment)},l(y){s=i(y,"P",{});var D=p(s);u=l(D,"Uma vez que seu modelo estiver afinado, voc\xEA pode salv\xE1-lo com seu Tokenizer usando "),o=i(D,"CODE",{});var W=p(o);f=l(W,"PreTrainedModel.save_pretrained()"),W.forEach(t),h=l(D,":"),D.forEach(t),g=$(y),k(C.$$.fragment,y),M=$(y),x=i(y,"P",{});var R=p(x);O=l(R,"Quando voc\xEA estiver pronto para us\xE1-lo novamente, recarregue com "),S=i(R,"CODE",{});var oe=p(S);I=l(oe,"PreTrainedModel.from_pretrained()"),oe.forEach(t),F=l(R,":"),R.forEach(t),U=$(y),k(z.$$.fragment,y)},m(y,D){c(y,s,D),a(s,u),a(s,o),a(o,f),a(s,h),c(y,g,D),w(C,y,D),c(y,M,D),c(y,x,D),a(x,O),a(x,S),a(S,I),a(x,F),c(y,U,D),w(z,y,D),_=!0},p:ye,i(y){_||(j(C.$$.fragment,y),j(z.$$.fragment,y),_=!0)},o(y){A(C.$$.fragment,y),A(z.$$.fragment,y),_=!1},d(y){y&&t(s),y&&t(g),q(C,y),y&&t(M),y&&t(x),y&&t(U),q(z,y)}}}function om(P){let s,u;return s=new _e({props:{$$slots:{default:[sm]},$$scope:{ctx:P}}}),{c(){E(s.$$.fragment)},l(o){k(s.$$.fragment,o)},m(o,f){w(s,o,f),u=!0},p(o,f){const h={};f&2&&(h.$$scope={dirty:f,ctx:o}),s.$set(h)},i(o){u||(j(s.$$.fragment,o),u=!0)},o(o){A(s.$$.fragment,o),u=!1},d(o){q(s,o)}}}function rm(P){let s,u,o,f,h,g,C,M,x,O,S,I,F,U,z;return C=new L({props:{code:`tf_save_directory = "./tf_save_pretrained" tokenizer.save_pretrained(tf_save_directory) tf_model.save_pretrained(tf_save_directory)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>tf_save_directory = <span class="hljs-string">&quot;./tf_save_pretrained&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.save_pretrained(tf_save_directory) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_model.save_pretrained(tf_save_directory)`}}),U=new L({props:{code:'tf_model = TFAutoModelForSequenceClassification.from_pretrained("./tf_save_pretrained")',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>tf_model = TFAutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;./tf_save_pretrained&quot;</span>)'}}),{c(){s=n("p"),u=r("Uma vez que seu modelo estiver afinado, voc\xEA pode salv\xE1-lo com seu Tokenizer usando "),o=n("code"),f=r("TFPreTrainedModel.save_pretrained()"),h=r(":"),g=d(),E(C.$$.fragment),M=d(),x=n("p"),O=r("Quando voc\xEA estiver pronto para us\xE1-lo novamente, recarregue com "),S=n("code"),I=r("TFPreTrainedModel.from_pretrained()"),F=d(),E(U.$$.fragment)},l(_){s=i(_,"P",{});var y=p(s);u=l(y,"Uma vez que seu modelo estiver afinado, voc\xEA pode salv\xE1-lo com seu Tokenizer usando "),o=i(y,"CODE",{});var D=p(o);f=l(D,"TFPreTrainedModel.save_pretrained()"),D.forEach(t),h=l(y,":"),y.forEach(t),g=$(_),k(C.$$.fragment,_),M=$(_),x=i(_,"P",{});var W=p(x);O=l(W,"Quando voc\xEA estiver pronto para us\xE1-lo novamente, recarregue com "),S=i(W,"CODE",{});var R=p(S);I=l(R,"TFPreTrainedModel.from_pretrained()"),R.forEach(t),W.forEach(t),F=$(_),k(U.$$.fragment,_)},m(_,y){c(_,s,y),a(s,u),a(s,o),a(o,f),a(s,h),c(_,g,y),w(C,_,y),c(_,M,y),c(_,x,y),a(x,O),a(x,S),a(S,I),c(_,F,y),w(U,_,y),z=!0},p:ye,i(_){z||(j(C.$$.fragment,_),j(U.$$.fragment,_),z=!0)},o(_){A(C.$$.fragment,_),A(U.$$.fragment,_),z=!1},d(_){_&&t(s),_&&t(g),q(C,_),_&&t(M),_&&t(x),_&&t(F),q(U,_)}}}function lm(P){let s,u;return s=new _e({props:{$$slots:{default:[rm]},$$scope:{ctx:P}}}),{c(){E(s.$$.fragment)},l(o){k(s.$$.fragment,o)},m(o,f){w(s,o,f),u=!0},p(o,f){const h={};f&2&&(h.$$scope={dirty:f,ctx:o}),s.$set(h)},i(o){u||(j(s.$$.fragment,o),u=!0)},o(o){A(s.$$.fragment,o),u=!1},d(o){q(s,o)}}}function nm(P){let s,u;return s=new L({props:{code:`from transformers import AutoModel tokenizer = AutoTokenizer.from_pretrained(tf_save_directory) pt_model = AutoModelForSequenceClassification.from_pretrained(tf_save_directory, from_tf=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(tf_save_directory) <span class="hljs-meta">&gt;&gt;&gt; </span>pt_model = AutoModelForSequenceClassification.from_pretrained(tf_save_directory, from_tf=<span class="hljs-literal">True</span>)`}}),{c(){E(s.$$.fragment)},l(o){k(s.$$.fragment,o)},m(o,f){w(s,o,f),u=!0},p:ye,i(o){u||(j(s.$$.fragment,o),u=!0)},o(o){A(s.$$.fragment,o),u=!1},d(o){q(s,o)}}}function im(P){let s,u;return s=new _e({props:{$$slots:{default:[nm]},$$scope:{ctx:P}}}),{c(){E(s.$$.fragment)},l(o){k(s.$$.fragment,o)},m(o,f){w(s,o,f),u=!0},p(o,f){const h={};f&2&&(h.$$scope={dirty:f,ctx:o}),s.$set(h)},i(o){u||(j(s.$$.fragment,o),u=!0)},o(o){A(s.$$.fragment,o),u=!1},d(o){q(s,o)}}}function pm(P){let s,u;return s=new L({props:{code:`from transformers import TFAutoModel tokenizer = AutoTokenizer.from_pretrained(pt_save_directory) tf_model = TFAutoModelForSequenceClassification.from_pretrained(pt_save_directory, from_pt=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(pt_save_directory) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_model = TFAutoModelForSequenceClassification.from_pretrained(pt_save_directory, from_pt=<span class="hljs-literal">True</span>)`}}),{c(){E(s.$$.fragment)},l(o){k(s.$$.fragment,o)},m(o,f){w(s,o,f),u=!0},p:ye,i(o){u||(j(s.$$.fragment,o),u=!0)},o(o){A(s.$$.fragment,o),u=!1},d(o){q(s,o)}}}function mm(P){let s,u;return s=new _e({props:{$$slots:{default:[pm]},$$scope:{ctx:P}}}),{c(){E(s.$$.fragment)},l(o){k(s.$$.fragment,o)},m(o,f){w(s,o,f),u=!0},p(o,f){const h={};f&2&&(h.$$scope={dirty:f,ctx:o}),s.$set(h)},i(o){u||(j(s.$$.fragment,o),u=!0)},o(o){A(s.$$.fragment,o),u=!1},d(o){q(s,o)}}}function cm(P){let s,u,o,f,h,g,C,M,x,O,S,I,F,U,z,_,y,D,W,R,oe,Y,ee,J,V,X,Q,K,ue,re,de,le,ae,ne,$e,T,N,ie,v,H,me,Ce,fe,ge,pe,Fe,ve,Ba,da,G,bt,Zo,Xo,Et,er,ar,kt,tr,sr,wt,or,rr,jt,lr,nr,At,ir,pr,qt,mr,cr,Tt,ur,Ts,$a,xt,fr,dr,xs,be,yt,$r,hr,Ct,_r,gr,zt,vr,ys,ha,Pt,br,Er,Cs,Ne,Ot,kr,wr,Mt,jr,zs,Ue,Ps,ze,He,Dt,_a,Ar,St,qr,Os,Le,Tr,It,xr,yr,Ms,Ya,Cr,Ds,Re,Ss,We,zr,Ft,Pr,Or,Is,ga,Fs,Ee,Mr,va,Dr,Sr,Nt,Ir,Fr,Ns,ba,Us,Ve,Nr,Ut,Ur,Hr,Hs,Ea,Ls,ke,Lr,Ht,Rr,Wr,ka,Vr,Gr,Rs,wa,Ws,Ge,Br,Lt,Yr,Jr,Vs,ja,Gs,we,Qr,Aa,Kr,Zr,qa,Xr,el,Bs,Ta,Ys,Ja,al,Js,xa,Qs,Be,tl,Rt,sl,ol,Ks,ya,Zs,Ye,rl,Qa,ll,nl,Xs,Pe,Je,Wt,Ca,il,Vt,pl,eo,he,ml,Gt,cl,ul,za,fl,dl,Pa,$l,hl,ao,Oa,to,Qe,so,je,_l,Bt,gl,vl,Yt,bl,El,oo,Ma,ro,Ae,kl,Ka,wl,jl,Za,Al,ql,lo,Oe,Ke,Jt,Da,Tl,Qt,xl,no,Sa,io,Z,yl,Kt,Cl,zl,Zt,Pl,Ol,Xt,Ml,Dl,Xa,Sl,Il,es,Fl,Nl,as,Ul,Hl,po,qe,Ll,ts,Rl,Wl,ss,Vl,Gl,mo,Me,Ze,os,Ia,Bl,rs,Yl,co,Te,Jl,ls,Ql,Kl,et,Zl,Xl,uo,Xe,en,ns,an,tn,fo,Fa,$o,ea,sn,is,on,rn,ho,at,ln,_o,Na,go,tt,nn,vo,aa,st,ot,pn,mn,cn,rt,lt,un,fn,bo,ta,dn,ps,$n,hn,Eo,sa,ko,oa,_n,nt,gn,vn,wo,De,ra,ms,Ua,bn,cs,En,jo,la,Ao,na,qo,B,kn,Ha,us,wn,jn,fs,An,qn,La,Tn,xn,ds,yn,Cn,$s,zn,Pn,Ra,On,Mn,it,Dn,Sn,To,ia,xo,Se,pa,hs,Wa,In,_s,Fn,yo,ma,Co,xe,Nn,gs,Un,Hn,vs,Ln,Rn,zo,ca,Po;return g=new fa({}),S=new Mp({props:{classNames:"absolute z-10 right-0 top-0",options:[{label:"Mixed",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/main/transformers_doc/pt/quicktour.ipynb"},{label:"PyTorch",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/main/transformers_doc/pt/pytorch/quicktour.ipynb"},{label:"TensorFlow",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/main/transformers_doc/pt/tensorflow/quicktour.ipynb"},{label:"Mixed",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/transformers_doc/pt/quicktour.ipynb"},{label:"PyTorch",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/transformers_doc/pt/pytorch/quicktour.ipynb"},{label:"TensorFlow",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/transformers_doc/pt/tensorflow/quicktour.ipynb"}]}}),Y=new vt({props:{$$slots:{default:[Dp]},$$scope:{ctx:P}}}),Q=new fa({}),N=new xp({props:{id:"tiZFewofSLM"}}),Ue=new vt({props:{$$slots:{default:[Sp]},$$scope:{ctx:P}}}),_a=new fa({}),Re=new gt({props:{pytorch:!0,tensorflow:!0,jax:!1,$$slots:{tensorflow:[Up],pytorch:[Fp]},$$scope:{ctx:P}}}),ga=new L({props:{code:`from transformers import pipeline classifier = pipeline("sentiment-analysis")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-meta">&gt;&gt;&gt; </span>classifier = pipeline(<span class="hljs-string">&quot;sentiment-analysis&quot;</span>)`}}),ba=new L({props:{code:'classifier("We are very happy to show you the \u{1F917} Transformers library.")',highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>classifier(<span class="hljs-string">&quot;We are very happy to show you the \u{1F917} Transformers library.&quot;</span>) [{<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;POSITIVE&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.9998</span>}]`}}),Ea=new L({props:{code:`results = classifier(["We are very happy to show you the \u{1F917} Transformers library.", "We hope you don't hate it."]) for result in results: print(f"label: {result['label']}, with score: {round(result['score'], 4)}")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>results = classifier([<span class="hljs-string">&quot;We are very happy to show you the \u{1F917} Transformers library.&quot;</span>, <span class="hljs-string">&quot;We hope you don&#x27;t hate it.&quot;</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> result <span class="hljs-keyword">in</span> results: <span class="hljs-meta">... </span> <span class="hljs-built_in">print</span>(<span class="hljs-string">f&quot;label: <span class="hljs-subst">{result[<span class="hljs-string">&#x27;label&#x27;</span>]}</span>, with score: <span class="hljs-subst">{<span class="hljs-built_in">round</span>(result[<span class="hljs-string">&#x27;score&#x27;</span>], <span class="hljs-number">4</span>)}</span>&quot;</span>) label: POSITIVE, <span class="hljs-keyword">with</span> score: <span class="hljs-number">0.9998</span> label: NEGATIVE, <span class="hljs-keyword">with</span> score: <span class="hljs-number">0.5309</span>`}}),wa=new L({props:{code:"pip install datasets ",highlighted:"pip install datasets "}}),ja=new L({props:{code:`import torch from transformers import pipeline speech_recognizer = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-meta">&gt;&gt;&gt; </span>speech_recognizer = pipeline(<span class="hljs-string">&quot;automatic-speech-recognition&quot;</span>, model=<span class="hljs-string">&quot;facebook/wav2vec2-base-960h&quot;</span>)`}}),Ta=new L({props:{code:`from datasets import load_dataset, Audio dataset = load_dataset("PolyAI/minds14", name="en-US", split="train")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset, Audio <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;PolyAI/minds14&quot;</span>, name=<span class="hljs-string">&quot;en-US&quot;</span>, split=<span class="hljs-string">&quot;train&quot;</span>)`}}),xa=new L({props:{code:'dataset = dataset.cast_column("audio", Audio(sampling_rate=speech_recognizer.feature_extractor.sampling_rate))',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>dataset = dataset.cast_column(<span class="hljs-string">&quot;audio&quot;</span>, Audio(sampling_rate=speech_recognizer.feature_extractor.sampling_rate))'}}),ya=new L({props:{code:`result = speech_recognizer(dataset[:4]["audio"]) print([d["text"] for d in result])`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>result = speech_recognizer(dataset[:<span class="hljs-number">4</span>][<span class="hljs-string">&quot;audio&quot;</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>([d[<span class="hljs-string">&quot;text&quot;</span>] <span class="hljs-keyword">for</span> d <span class="hljs-keyword">in</span> result]) [<span class="hljs-string">&#x27;I WOULD LIKE TO SET UP A JOINT ACCOUNT WITH MY PARTNER HOW DO I PROCEED WITH DOING THAT&#x27;</span>, <span class="hljs-string">&quot;FONDERING HOW I&#x27;D SET UP A JOIN TO HET WITH MY WIFE AND WHERE THE AP MIGHT BE&quot;</span>, <span class="hljs-string">&quot;I I&#x27;D LIKE TOY SET UP A JOINT ACCOUNT WITH MY PARTNER I&#x27;M NOT SEEING THE OPTION TO DO IT ON THE APSO I CALLED IN TO GET SOME HELP CAN I JUST DO IT OVER THE PHONE WITH YOU AND GIVE YOU THE INFORMATION OR SHOULD I DO IT IN THE AP AND I&#x27;M MISSING SOMETHING UQUETTE HAD PREFERRED TO JUST DO IT OVER THE PHONE OF POSSIBLE THINGS&quot;</span>, <span class="hljs-string">&#x27;HOW DO I TURN A JOIN A COUNT&#x27;</span>]`}}),Ca=new fa({}),Oa=new L({props:{code:'model_name = "nlptown/bert-base-multilingual-uncased-sentiment"',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>model_name = <span class="hljs-string">&quot;nlptown/bert-base-multilingual-uncased-sentiment&quot;</span>'}}),Qe=new gt({props:{pytorch:!0,tensorflow:!0,jax:!1,$$slots:{tensorflow:[Wp],pytorch:[Lp]},$$scope:{ctx:P}}}),Ma=new L({props:{code:`classifier = pipeline("sentiment-analysis", model=model, tokenizer=tokenizer) classifier("Nous sommes tr\xE8s heureux de vous pr\xE9senter la biblioth\xE8que \u{1F917} Transformers.")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>classifier = pipeline(<span class="hljs-string">&quot;sentiment-analysis&quot;</span>, model=model, tokenizer=tokenizer) <span class="hljs-meta">&gt;&gt;&gt; </span>classifier(<span class="hljs-string">&quot;Nous sommes tr\xE8s heureux de vous pr\xE9senter la biblioth\xE8que \u{1F917} Transformers.&quot;</span>) [{<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;5 stars&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.7273</span>}]`}}),Da=new fa({}),Sa=new xp({props:{id:"AhChOFRegn4"}}),Ia=new fa({}),Fa=new L({props:{code:`from transformers import AutoTokenizer model_name = "nlptown/bert-base-multilingual-uncased-sentiment" tokenizer = AutoTokenizer.from_pretrained(model_name)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>model_name = <span class="hljs-string">&quot;nlptown/bert-base-multilingual-uncased-sentiment&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(model_name)`}}),Na=new L({props:{code:`encoding = tokenizer("We are very happy to show you the \u{1F917} Transformers library.") print(encoding)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer(<span class="hljs-string">&quot;We are very happy to show you the \u{1F917} Transformers library.&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(encoding) {<span class="hljs-string">&#x27;input_ids&#x27;</span>: [<span class="hljs-number">101</span>, <span class="hljs-number">11312</span>, <span class="hljs-number">10320</span>, <span class="hljs-number">12495</span>, <span class="hljs-number">19308</span>, <span class="hljs-number">10114</span>, <span class="hljs-number">11391</span>, <span class="hljs-number">10855</span>, <span class="hljs-number">10103</span>, <span class="hljs-number">100</span>, <span class="hljs-number">58263</span>, <span class="hljs-number">13299</span>, <span class="hljs-number">119</span>, <span class="hljs-number">102</span>], <span class="hljs-string">&#x27;token_type_ids&#x27;</span>: [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], <span class="hljs-string">&#x27;attention_mask&#x27;</span>: [<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>]}`}}),sa=new gt({props:{pytorch:!0,tensorflow:!0,jax:!1,$$slots:{tensorflow:[Yp],pytorch:[Gp]},$$scope:{ctx:P}}}),Ua=new fa({}),la=new gt({props:{pytorch:!0,tensorflow:!0,jax:!1,$$slots:{tensorflow:[em],pytorch:[Kp]},$$scope:{ctx:P}}}),na=new vt({props:{$$slots:{default:[am]},$$scope:{ctx:P}}}),ia=new vt({props:{$$slots:{default:[tm]},$$scope:{ctx:P}}}),Wa=new fa({}),ma=new gt({props:{pytorch:!0,tensorflow:!0,jax:!1,$$slots:{tensorflow:[lm],pytorch:[om]},$$scope:{ctx:P}}}),ca=new gt({props:{pytorch:!0,tensorflow:!0,jax:!1,$$slots:{tensorflow:[mm],pytorch:[im]},$$scope:{ctx:P}}}),{c(){s=n("meta"),u=d(),o=n("h1"),f=n("a"),h=n("span"),E(g.$$.fragment),C=d(),M=n("span"),x=r("Tour r\xE1pido"),O=d(),E(S.$$.fragment),I=d(),F=n("p"),U=r("Comece a trabalhar com \u{1F917} Transformers! Comece usando "),z=n("code"),_=r("pipeline()"),y=r(" para r\xE1pida infer\xEAncia e facilmente carregue um modelo pr\xE9-treinado e um tokenizer com "),D=n("a"),W=r("AutoClass"),R=r(" para resolver tarefas de texto, vis\xE3o ou \xE1udio."),oe=d(),E(Y.$$.fragment),ee=d(),J=n("h2"),V=n("a"),X=n("span"),E(Q.$$.fragment),K=d(),ue=n("span"),re=r("Pipeline"),de=d(),le=n("p"),ae=n("code"),ne=r("pipeline()"),$e=r(" \xE9 a maneira mais f\xE1cil de usar um modelo pr\xE9-treinado para uma dada tarefa."),T=d(),E(N.$$.fragment),ie=d(),v=n("p"),H=r("A "),me=n("code"),Ce=r("pipeline()"),fe=r(" apoia diversas tarefas fora da caixa:"),ge=d(),pe=n("p"),Fe=n("strong"),ve=r("Texto"),Ba=r(":"),da=d(),G=n("ul"),bt=n("li"),Zo=r("An\xE1lise sentimental: classifica a polaridade de um texto."),Xo=d(),Et=n("li"),er=r("Gera\xE7\xE3o de texto (em Ingl\xEAs): gera texto a partir de uma entrada."),ar=d(),kt=n("li"),tr=r("Reconhecimento de entidade mencionada: legenda cada palavra com uma classe que a representa (pessoa, data, local, etc\u2026)"),sr=d(),wt=n("li"),or=r("Respostas: extrai uma resposta dado algum contexto e uma quest\xE3o"),rr=d(),jt=n("li"),lr=r("M\xE1scara de preenchimento: preenche o espa\xE7o, dado um texto com m\xE1scaras de palavras."),nr=d(),At=n("li"),ir=r("Sumariza\xE7\xE3o: gera o resumo de um texto longo ou documento."),pr=d(),qt=n("li"),mr=r("Tradu\xE7\xE3o: traduz texto para outra l\xEDngua."),cr=d(),Tt=n("li"),ur=r("Extra\xE7\xE3o de caracter\xEDsticas: cria um tensor que representa o texto."),Ts=d(),$a=n("p"),xt=n("strong"),fr=r("Imagem"),dr=r(":"),xs=d(),be=n("ul"),yt=n("li"),$r=r("Classifica\xE7\xE3o de imagens: classifica uma imagem."),hr=d(),Ct=n("li"),_r=r("Segmenta\xE7\xE3o de imagem: classifica cada pixel da imagem."),gr=d(),zt=n("li"),vr=r("Detec\xE7\xE3o de objetos: detecta objetos em uma imagem."),ys=d(),ha=n("p"),Pt=n("strong"),br=r("Audio"),Er=r(":"),Cs=d(),Ne=n("ul"),Ot=n("li"),kr=r("Classfica\xE7\xE3o de \xE1udio: legenda um trecho de \xE1udio fornecido."),wr=d(),Mt=n("li"),jr=r("Reconhecimento de fala autom\xE1tico: transcreve audio em texto."),zs=d(),E(Ue.$$.fragment),Ps=d(),ze=n("h3"),He=n("a"),Dt=n("span"),E(_a.$$.fragment),Ar=d(),St=n("span"),qr=r("Uso da pipeline"),Os=d(),Le=n("p"),Tr=r("No exemplo a seguir, voc\xEA usar\xE1 "),It=n("code"),xr=r("pipeline()"),yr=r(" para an\xE1lise sentimental."),Ms=d(),Ya=n("p"),Cr=r("Instale as seguintes depend\xEAncias se voc\xEA ainda n\xE3o o fez:"),Ds=d(),E(Re.$$.fragment),Ss=d(),We=n("p"),zr=r("Importe "),Ft=n("code"),Pr=r("pipeline()"),Or=r(" e especifique a tarefa que deseja completar:"),Is=d(),E(ga.$$.fragment),Fs=d(),Ee=n("p"),Mr=r("A pipeline baixa and armazena um "),va=n("a"),Dr=r("modelo pr\xE9-treinado"),Sr=r(" padr\xE3o e tokenizer para an\xE1lise sentimental. Agora voc\xEA pode usar "),Nt=n("code"),Ir=r("classifier"),Fr=r(" no texto alvo:"),Ns=d(),E(ba.$$.fragment),Us=d(),Ve=n("p"),Nr=r("Para mais de uma senten\xE7a, passe uma lista para a "),Ut=n("code"),Ur=r("pipeline()"),Hr=r(", a qual retornar\xE1 uma lista de dicion\xE1rios:"),Hs=d(),E(Ea.$$.fragment),Ls=d(),ke=n("p"),Lr=r("A "),Ht=n("code"),Rr=r("pipeline()"),Wr=r(" tamb\xE9m pode iterar sobre um Dataset inteiro. Comece instalando a biblioteca de "),ka=n("a"),Vr=r("\u{1F917} Datasets"),Gr=r(":"),Rs=d(),E(wa.$$.fragment),Ws=d(),Ge=n("p"),Br=r("Crie uma "),Lt=n("code"),Yr=r("pipeline()"),Jr=r(" com a tarefa que deseja resolver e o modelo que deseja usar."),Vs=d(),E(ja.$$.fragment),Gs=d(),we=n("p"),Qr=r("A seguir, carregue uma base de dados (confira a \u{1F917} "),Aa=n("a"),Kr=r("Inicia\xE7\xE3o em Datasets"),Zr=r(" para mais detalhes) que voc\xEA gostaria de iterar sobre. Por exemplo, vamos carregar o dataset "),qa=n("a"),Xr=r("MInDS-14"),el=r(":"),Bs=d(),E(Ta.$$.fragment),Ys=d(),Ja=n("p"),al=r("Precisamos garantir que a taxa de amostragem do conjunto de dados corresponda \xE0 taxa de amostragem em que o facebook/wav2vec2-base-960h foi treinado."),Js=d(),E(xa.$$.fragment),Qs=d(),Be=n("p"),tl=r("Os arquivos de \xE1udio s\xE3o carregados e re-amostrados automaticamente ao chamar a coluna "),Rt=n("code"),sl=r('"audio"'),ol=r(`. Vamos extrair as arrays de formas de onda originais das primeiras 4 amostras e pass\xE1-las como uma lista para o pipeline:`),Ks=d(),E(ya.$$.fragment),Zs=d(),Ye=n("p"),rl=r("Para um conjunto de dados maior onde as entradas s\xE3o maiores (como em fala ou vis\xE3o), ser\xE1 necess\xE1rio passar um gerador em vez de uma lista que carregue todas as entradas na mem\xF3ria. Consulte a "),Qa=n("a"),ll=r("documenta\xE7\xE3o do pipeline"),nl=r(" para mais informa\xE7\xF5es."),Xs=d(),Pe=n("h3"),Je=n("a"),Wt=n("span"),E(Ca.$$.fragment),il=d(),Vt=n("span"),pl=r("Use outro modelo e tokenizer na pipeline"),eo=d(),he=n("p"),ml=r("A "),Gt=n("code"),cl=r("pipeline()"),ul=r(" pode acomodar qualquer modelo do "),za=n("a"),fl=r("Model Hub"),dl=r(", facilitando sua adapta\xE7\xE3o para outros casos de uso. Por exemplo, se voc\xEA quiser um modelo capaz de lidar com texto em franc\xEAs, use as tags no Model Hub para filtrar um modelo apropriado. O principal resultado filtrado retorna um "),Pa=n("a"),$l=r("modelo BERT"),hl=r(" bil\xEDngue ajustado para an\xE1lise de sentimentos. \xD3timo, vamos usar este modelo!"),ao=d(),E(Oa.$$.fragment),to=d(),E(Qe.$$.fragment),so=d(),je=n("p"),_l=r("Ent\xE3o voc\xEA pode especificar o modelo e o tokenizador na "),Bt=n("code"),gl=r("pipeline()"),vl=r(" e aplicar o "),Yt=n("code"),bl=r("classifier"),El=r(" no seu texto alvo:"),oo=d(),E(Ma.$$.fragment),ro=d(),Ae=n("p"),kl=r("Se voc\xEA n\xE3o conseguir achar um modelo para o seu caso de uso, precisar\xE1 usar fine-tune em um modelo pr\xE9-treinado nos seus dados. Veja nosso "),Ka=n("a"),wl=r("tutorial de fine-tuning"),jl=r(" para descobrir como. Finalmente, depois que voc\xEA tiver usado esse processo em seu modelo, considere compartilh\xE1-lo conosco (veja o tutorial "),Za=n("a"),Al=r("aqui"),ql=r(") na plataforma Model Hub afim de democratizar NLP! \u{1F917}"),lo=d(),Oe=n("h2"),Ke=n("a"),Jt=n("span"),E(Da.$$.fragment),Tl=d(),Qt=n("span"),xl=r("AutoClass"),no=d(),E(Sa.$$.fragment),io=d(),Z=n("p"),yl=r("Por baixo dos panos, as classes "),Kt=n("code"),Cl=r("AutoModelForSequenceClassification"),zl=r(" e "),Zt=n("code"),Pl=r("AutoTokenizer"),Ol=r(" trabalham juntas para fortificar o "),Xt=n("code"),Ml=r("pipeline()"),Dl=r(". Um "),Xa=n("a"),Sl=r("AutoClass"),Il=r(" \xE9 um atalho que automaticamente recupera a arquitetura de um modelo pr\xE9-treinado a partir de seu nome ou caminho. Basta selecionar a "),es=n("code"),Fl=r("AutoClass"),Nl=r(" apropriada para sua tarefa e seu tokenizer associado com "),as=n("code"),Ul=r("AutoTokenizer"),Hl=r("."),po=d(),qe=n("p"),Ll=r("Vamos voltar ao nosso exemplo e ver como voc\xEA pode usar a "),ts=n("code"),Rl=r("AutoClass"),Wl=r(" para replicar os resultados do "),ss=n("code"),Vl=r("pipeline()"),Gl=r("."),mo=d(),Me=n("h3"),Ze=n("a"),os=n("span"),E(Ia.$$.fragment),Bl=d(),rs=n("span"),Yl=r("AutoTokenizer"),co=d(),Te=n("p"),Jl=r("Um tokenizer \xE9 respons\xE1vel por pr\xE9-processar o texto em um formato que seja compreens\xEDvel para o modelo. Primeiro, o tokenizer dividir\xE1 o texto em palavras chamadas "),ls=n("em"),Ql=r("tokens"),Kl=r(". Existem v\xE1rias regras que regem o processo de tokeniza\xE7\xE3o, incluindo como dividir uma palavra e em que n\xEDvel (saiba mais sobre tokeniza\xE7\xE3o "),et=n("a"),Zl=r("aqui"),Xl=r("). A coisa mais importante a lembrar, por\xE9m, \xE9 que voc\xEA precisa instanciar o tokenizer com o mesmo nome do modelo para garantir que est\xE1 usando as mesmas regras de tokeniza\xE7\xE3o com as quais um modelo foi pr\xE9-treinado."),uo=d(),Xe=n("p"),en=r("Carregue um tokenizer com "),ns=n("code"),an=r("AutoTokenizer"),tn=r(":"),fo=d(),E(Fa.$$.fragment),$o=d(),ea=n("p"),sn=r("Em seguida, o tokenizer converte os tokens em n\xFAmeros para construir um tensor como entrada para o modelo. Isso \xE9 conhecido como o "),is=n("em"),on=r("vocabul\xE1rio"),rn=r(" do modelo."),ho=d(),at=n("p"),ln=r("Passe o texto para o tokenizer:"),_o=d(),E(Na.$$.fragment),go=d(),tt=n("p"),nn=r("O tokenizer retornar\xE1 um dicion\xE1rio contendo:"),vo=d(),aa=n("ul"),st=n("li"),ot=n("a"),pn=r("input_ids"),mn=r(": representa\xE7\xF5es num\xE9ricas de seus tokens."),cn=d(),rt=n("li"),lt=n("a"),un=r("atttention_mask"),fn=r(": indica quais tokens devem ser atendidos."),bo=d(),ta=n("p"),dn=r("Assim como o "),ps=n("code"),$n=r("pipeline()"),hn=r(", o tokenizer aceitar\xE1 uma lista de entradas. Al\xE9m disso, o tokenizer tamb\xE9m pode preencher e truncar o texto para retornar um lote com comprimento uniforme:"),Eo=d(),E(sa.$$.fragment),ko=d(),oa=n("p"),_n=r("Leia o tutorial de "),nt=n("a"),gn=r("pr\xE9-processamento"),vn=r(" para obter mais detalhes sobre tokeniza\xE7\xE3o."),wo=d(),De=n("h3"),ra=n("a"),ms=n("span"),E(Ua.$$.fragment),bn=d(),cs=n("span"),En=r("AutoModel"),jo=d(),E(la.$$.fragment),Ao=d(),E(na.$$.fragment),qo=d(),B=n("p"),kn=r("Os modelos s\xE3o um standard "),Ha=n("a"),us=n("code"),wn=r("torch.nn.Module"),jn=r(" ou um ["),fs=n("code"),An=r("tf.keras.Model"),qn=r("](https: //"),La=n("a"),Tn=r("www.tensorflow.org/api_docs/python/tf/keras/Model"),xn=r(") para que voc\xEA possa us\xE1-los em seu loop de treinamento habitual. No entanto, para facilitar as coisas, \u{1F917} Transformers fornece uma classe "),ds=n("code"),yn=r("Trainer"),Cn=r(" para PyTorch que adiciona funcionalidade para treinamento distribu\xEDdo, precis\xE3o mista e muito mais. Para o TensorFlow, voc\xEA pode usar o m\xE9todo "),$s=n("code"),zn=r("fit"),Pn=r(" de "),Ra=n("a"),On=r("Keras"),Mn=r(". Consulte o "),it=n("a"),Dn=r("tutorial de treinamento"),Sn=r(" para obter mais detalhes."),To=d(),E(ia.$$.fragment),xo=d(),Se=n("h3"),pa=n("a"),hs=n("span"),E(Wa.$$.fragment),In=d(),_s=n("span"),Fn=r("Salvar um modelo"),yo=d(),E(ma.$$.fragment),Co=d(),xe=n("p"),Nn=r("Um recurso particularmente interessante dos \u{1F917} Transformers \xE9 a capacidade de salvar um modelo e recarreg\xE1-lo como um modelo PyTorch ou TensorFlow. Use "),gs=n("code"),Un=r("from_pt"),Hn=r(" ou "),vs=n("code"),Ln=r("from_tf"),Rn=r(" para converter o modelo de um framework para outro:"),zo=d(),E(ca.$$.fragment),this.h()},l(e){const m=Pp('[data-svelte="svelte-1phssyn"]',document.head);s=i(m,"META",{name:!0,content:!0}),m.forEach(t),u=$(e),o=i(e,"H1",{class:!0});var Va=p(o);f=i(Va,"A",{id:!0,class:!0,href:!0});var bs=p(f);h=i(bs,"SPAN",{});var Es=p(h);k(g.$$.fragment,Es),Es.forEach(t),bs.forEach(t),C=$(Va),M=i(Va,"SPAN",{});var ks=p(M);x=l(ks,"Tour r\xE1pido"),ks.forEach(t),Va.forEach(t),O=$(e),k(S.$$.fragment,e),I=$(e),F=i(e,"P",{});var Ie=p(F);U=l(Ie,"Comece a trabalhar com \u{1F917} Transformers! Comece usando "),z=i(Ie,"CODE",{});var ws=p(z);_=l(ws,"pipeline()"),ws.forEach(t),y=l(Ie," para r\xE1pida infer\xEAncia e facilmente carregue um modelo pr\xE9-treinado e um tokenizer com "),D=i(Ie,"A",{href:!0});var js=p(D);W=l(js,"AutoClass"),js.forEach(t),R=l(Ie," para resolver tarefas de texto, vis\xE3o ou \xE1udio."),Ie.forEach(t),oe=$(e),k(Y.$$.fragment,e),ee=$(e),J=i(e,"H2",{class:!0});var Ga=p(J);V=i(Ga,"A",{id:!0,class:!0,href:!0});var As=p(V);X=i(As,"SPAN",{});var qs=p(X);k(Q.$$.fragment,qs),qs.forEach(t),As.forEach(t),K=$(Ga),ue=i(Ga,"SPAN",{});var Qn=p(ue);re=l(Qn,"Pipeline"),Qn.forEach(t),Ga.forEach(t),de=$(e),le=i(e,"P",{});var Wn=p(le);ae=i(Wn,"CODE",{});var Kn=p(ae);ne=l(Kn,"pipeline()"),Kn.forEach(t),$e=l(Wn," \xE9 a maneira mais f\xE1cil de usar um modelo pr\xE9-treinado para uma dada tarefa."),Wn.forEach(t),T=$(e),k(N.$$.fragment,e),ie=$(e),v=i(e,"P",{});var Oo=p(v);H=l(Oo,"A "),me=i(Oo,"CODE",{});var Zn=p(me);Ce=l(Zn,"pipeline()"),Zn.forEach(t),fe=l(Oo," apoia diversas tarefas fora da caixa:"),Oo.forEach(t),ge=$(e),pe=i(e,"P",{});var Vn=p(pe);Fe=i(Vn,"STRONG",{});var Xn=p(Fe);ve=l(Xn,"Texto"),Xn.forEach(t),Ba=l(Vn,":"),Vn.forEach(t),da=$(e),G=i(e,"UL",{});var te=p(G);bt=i(te,"LI",{});var ei=p(bt);Zo=l(ei,"An\xE1lise sentimental: classifica a polaridade de um texto."),ei.forEach(t),Xo=$(te),Et=i(te,"LI",{});var ai=p(Et);er=l(ai,"Gera\xE7\xE3o de texto (em Ingl\xEAs): gera texto a partir de uma entrada."),ai.forEach(t),ar=$(te),kt=i(te,"LI",{});var ti=p(kt);tr=l(ti,"Reconhecimento de entidade mencionada: legenda cada palavra com uma classe que a representa (pessoa, data, local, etc\u2026)"),ti.forEach(t),sr=$(te),wt=i(te,"LI",{});var si=p(wt);or=l(si,"Respostas: extrai uma resposta dado algum contexto e uma quest\xE3o"),si.forEach(t),rr=$(te),jt=i(te,"LI",{});var oi=p(jt);lr=l(oi,"M\xE1scara de preenchimento: preenche o espa\xE7o, dado um texto com m\xE1scaras de palavras."),oi.forEach(t),nr=$(te),At=i(te,"LI",{});var ri=p(At);ir=l(ri,"Sumariza\xE7\xE3o: gera o resumo de um texto longo ou documento."),ri.forEach(t),pr=$(te),qt=i(te,"LI",{});var li=p(qt);mr=l(li,"Tradu\xE7\xE3o: traduz texto para outra l\xEDngua."),li.forEach(t),cr=$(te),Tt=i(te,"LI",{});var ni=p(Tt);ur=l(ni,"Extra\xE7\xE3o de caracter\xEDsticas: cria um tensor que representa o texto."),ni.forEach(t),te.forEach(t),Ts=$(e),$a=i(e,"P",{});var Gn=p($a);xt=i(Gn,"STRONG",{});var ii=p(xt);fr=l(ii,"Imagem"),ii.forEach(t),dr=l(Gn,":"),Gn.forEach(t),xs=$(e),be=i(e,"UL",{});var pt=p(be);yt=i(pt,"LI",{});var pi=p(yt);$r=l(pi,"Classifica\xE7\xE3o de imagens: classifica uma imagem."),pi.forEach(t),hr=$(pt),Ct=i(pt,"LI",{});var mi=p(Ct);_r=l(mi,"Segmenta\xE7\xE3o de imagem: classifica cada pixel da imagem."),mi.forEach(t),gr=$(pt),zt=i(pt,"LI",{});var ci=p(zt);vr=l(ci,"Detec\xE7\xE3o de objetos: detecta objetos em uma imagem."),ci.forEach(t),pt.forEach(t),ys=$(e),ha=i(e,"P",{});var Bn=p(ha);Pt=i(Bn,"STRONG",{});var ui=p(Pt);br=l(ui,"Audio"),ui.forEach(t),Er=l(Bn,":"),Bn.forEach(t),Cs=$(e),Ne=i(e,"UL",{});var Mo=p(Ne);Ot=i(Mo,"LI",{});var fi=p(Ot);kr=l(fi,"Classfica\xE7\xE3o de \xE1udio: legenda um trecho de \xE1udio fornecido."),fi.forEach(t),wr=$(Mo),Mt=i(Mo,"LI",{});var di=p(Mt);jr=l(di,"Reconhecimento de fala autom\xE1tico: transcreve audio em texto."),di.forEach(t),Mo.forEach(t),zs=$(e),k(Ue.$$.fragment,e),Ps=$(e),ze=i(e,"H3",{class:!0});var Do=p(ze);He=i(Do,"A",{id:!0,class:!0,href:!0});var $i=p(He);Dt=i($i,"SPAN",{});var hi=p(Dt);k(_a.$$.fragment,hi),hi.forEach(t),$i.forEach(t),Ar=$(Do),St=i(Do,"SPAN",{});var _i=p(St);qr=l(_i,"Uso da pipeline"),_i.forEach(t),Do.forEach(t),Os=$(e),Le=i(e,"P",{});var So=p(Le);Tr=l(So,"No exemplo a seguir, voc\xEA usar\xE1 "),It=i(So,"CODE",{});var gi=p(It);xr=l(gi,"pipeline()"),gi.forEach(t),yr=l(So," para an\xE1lise sentimental."),So.forEach(t),Ms=$(e),Ya=i(e,"P",{});var vi=p(Ya);Cr=l(vi,"Instale as seguintes depend\xEAncias se voc\xEA ainda n\xE3o o fez:"),vi.forEach(t),Ds=$(e),k(Re.$$.fragment,e),Ss=$(e),We=i(e,"P",{});var Io=p(We);zr=l(Io,"Importe "),Ft=i(Io,"CODE",{});var bi=p(Ft);Pr=l(bi,"pipeline()"),bi.forEach(t),Or=l(Io," e especifique a tarefa que deseja completar:"),Io.forEach(t),Is=$(e),k(ga.$$.fragment,e),Fs=$(e),Ee=i(e,"P",{});var mt=p(Ee);Mr=l(mt,"A pipeline baixa and armazena um "),va=i(mt,"A",{href:!0,rel:!0});var Ei=p(va);Dr=l(Ei,"modelo pr\xE9-treinado"),Ei.forEach(t),Sr=l(mt," padr\xE3o e tokenizer para an\xE1lise sentimental. Agora voc\xEA pode usar "),Nt=i(mt,"CODE",{});var ki=p(Nt);Ir=l(ki,"classifier"),ki.forEach(t),Fr=l(mt," no texto alvo:"),mt.forEach(t),Ns=$(e),k(ba.$$.fragment,e),Us=$(e),Ve=i(e,"P",{});var Fo=p(Ve);Nr=l(Fo,"Para mais de uma senten\xE7a, passe uma lista para a "),Ut=i(Fo,"CODE",{});var wi=p(Ut);Ur=l(wi,"pipeline()"),wi.forEach(t),Hr=l(Fo,", a qual retornar\xE1 uma lista de dicion\xE1rios:"),Fo.forEach(t),Hs=$(e),k(Ea.$$.fragment,e),Ls=$(e),ke=i(e,"P",{});var ct=p(ke);Lr=l(ct,"A "),Ht=i(ct,"CODE",{});var ji=p(Ht);Rr=l(ji,"pipeline()"),ji.forEach(t),Wr=l(ct," tamb\xE9m pode iterar sobre um Dataset inteiro. Comece instalando a biblioteca de "),ka=i(ct,"A",{href:!0,rel:!0});var Ai=p(ka);Vr=l(Ai,"\u{1F917} Datasets"),Ai.forEach(t),Gr=l(ct,":"),ct.forEach(t),Rs=$(e),k(wa.$$.fragment,e),Ws=$(e),Ge=i(e,"P",{});var No=p(Ge);Br=l(No,"Crie uma "),Lt=i(No,"CODE",{});var qi=p(Lt);Yr=l(qi,"pipeline()"),qi.forEach(t),Jr=l(No," com a tarefa que deseja resolver e o modelo que deseja usar."),No.forEach(t),Vs=$(e),k(ja.$$.fragment,e),Gs=$(e),we=i(e,"P",{});var ut=p(we);Qr=l(ut,"A seguir, carregue uma base de dados (confira a \u{1F917} "),Aa=i(ut,"A",{href:!0,rel:!0});var Ti=p(Aa);Kr=l(Ti,"Inicia\xE7\xE3o em Datasets"),Ti.forEach(t),Zr=l(ut," para mais detalhes) que voc\xEA gostaria de iterar sobre. Por exemplo, vamos carregar o dataset "),qa=i(ut,"A",{href:!0,rel:!0});var xi=p(qa);Xr=l(xi,"MInDS-14"),xi.forEach(t),el=l(ut,":"),ut.forEach(t),Bs=$(e),k(Ta.$$.fragment,e),Ys=$(e),Ja=i(e,"P",{});var yi=p(Ja);al=l(yi,"Precisamos garantir que a taxa de amostragem do conjunto de dados corresponda \xE0 taxa de amostragem em que o facebook/wav2vec2-base-960h foi treinado."),yi.forEach(t),Js=$(e),k(xa.$$.fragment,e),Qs=$(e),Be=i(e,"P",{});var Uo=p(Be);tl=l(Uo,"Os arquivos de \xE1udio s\xE3o carregados e re-amostrados automaticamente ao chamar a coluna "),Rt=i(Uo,"CODE",{});var Ci=p(Rt);sl=l(Ci,'"audio"'),Ci.forEach(t),ol=l(Uo,`. Vamos extrair as arrays de formas de onda originais das primeiras 4 amostras e pass\xE1-las como uma lista para o pipeline:`),Uo.forEach(t),Ks=$(e),k(ya.$$.fragment,e),Zs=$(e),Ye=i(e,"P",{});var Ho=p(Ye);rl=l(Ho,"Para um conjunto de dados maior onde as entradas s\xE3o maiores (como em fala ou vis\xE3o), ser\xE1 necess\xE1rio passar um gerador em vez de uma lista que carregue todas as entradas na mem\xF3ria. Consulte a "),Qa=i(Ho,"A",{href:!0});var zi=p(Qa);ll=l(zi,"documenta\xE7\xE3o do pipeline"),zi.forEach(t),nl=l(Ho," para mais informa\xE7\xF5es."),Ho.forEach(t),Xs=$(e),Pe=i(e,"H3",{class:!0});var Lo=p(Pe);Je=i(Lo,"A",{id:!0,class:!0,href:!0});var Pi=p(Je);Wt=i(Pi,"SPAN",{});var Oi=p(Wt);k(Ca.$$.fragment,Oi),Oi.forEach(t),Pi.forEach(t),il=$(Lo),Vt=i(Lo,"SPAN",{});var Mi=p(Vt);pl=l(Mi,"Use outro modelo e tokenizer na pipeline"),Mi.forEach(t),Lo.forEach(t),eo=$(e),he=i(e,"P",{});var ua=p(he);ml=l(ua,"A "),Gt=i(ua,"CODE",{});var Di=p(Gt);cl=l(Di,"pipeline()"),Di.forEach(t),ul=l(ua," pode acomodar qualquer modelo do "),za=i(ua,"A",{href:!0,rel:!0});var Si=p(za);fl=l(Si,"Model Hub"),Si.forEach(t),dl=l(ua,", facilitando sua adapta\xE7\xE3o para outros casos de uso. Por exemplo, se voc\xEA quiser um modelo capaz de lidar com texto em franc\xEAs, use as tags no Model Hub para filtrar um modelo apropriado. O principal resultado filtrado retorna um "),Pa=i(ua,"A",{href:!0,rel:!0});var Ii=p(Pa);$l=l(Ii,"modelo BERT"),Ii.forEach(t),hl=l(ua," bil\xEDngue ajustado para an\xE1lise de sentimentos. \xD3timo, vamos usar este modelo!"),ua.forEach(t),ao=$(e),k(Oa.$$.fragment,e),to=$(e),k(Qe.$$.fragment,e),so=$(e),je=i(e,"P",{});var ft=p(je);_l=l(ft,"Ent\xE3o voc\xEA pode especificar o modelo e o tokenizador na "),Bt=i(ft,"CODE",{});var Fi=p(Bt);gl=l(Fi,"pipeline()"),Fi.forEach(t),vl=l(ft," e aplicar o "),Yt=i(ft,"CODE",{});var Ni=p(Yt);bl=l(Ni,"classifier"),Ni.forEach(t),El=l(ft," no seu texto alvo:"),ft.forEach(t),oo=$(e),k(Ma.$$.fragment,e),ro=$(e),Ae=i(e,"P",{});var dt=p(Ae);kl=l(dt,"Se voc\xEA n\xE3o conseguir achar um modelo para o seu caso de uso, precisar\xE1 usar fine-tune em um modelo pr\xE9-treinado nos seus dados. Veja nosso "),Ka=i(dt,"A",{href:!0});var Ui=p(Ka);wl=l(Ui,"tutorial de fine-tuning"),Ui.forEach(t),jl=l(dt," para descobrir como. Finalmente, depois que voc\xEA tiver usado esse processo em seu modelo, considere compartilh\xE1-lo conosco (veja o tutorial "),Za=i(dt,"A",{href:!0});var Hi=p(Za);Al=l(Hi,"aqui"),Hi.forEach(t),ql=l(dt,") na plataforma Model Hub afim de democratizar NLP! \u{1F917}"),dt.forEach(t),lo=$(e),Oe=i(e,"H2",{class:!0});var Ro=p(Oe);Ke=i(Ro,"A",{id:!0,class:!0,href:!0});var Li=p(Ke);Jt=i(Li,"SPAN",{});var Ri=p(Jt);k(Da.$$.fragment,Ri),Ri.forEach(t),Li.forEach(t),Tl=$(Ro),Qt=i(Ro,"SPAN",{});var Wi=p(Qt);xl=l(Wi,"AutoClass"),Wi.forEach(t),Ro.forEach(t),no=$(e),k(Sa.$$.fragment,e),io=$(e),Z=i(e,"P",{});var ce=p(Z);yl=l(ce,"Por baixo dos panos, as classes "),Kt=i(ce,"CODE",{});var Vi=p(Kt);Cl=l(Vi,"AutoModelForSequenceClassification"),Vi.forEach(t),zl=l(ce," e "),Zt=i(ce,"CODE",{});var Gi=p(Zt);Pl=l(Gi,"AutoTokenizer"),Gi.forEach(t),Ol=l(ce," trabalham juntas para fortificar o "),Xt=i(ce,"CODE",{});var Bi=p(Xt);Ml=l(Bi,"pipeline()"),Bi.forEach(t),Dl=l(ce,". Um "),Xa=i(ce,"A",{href:!0});var Yi=p(Xa);Sl=l(Yi,"AutoClass"),Yi.forEach(t),Il=l(ce," \xE9 um atalho que automaticamente recupera a arquitetura de um modelo pr\xE9-treinado a partir de seu nome ou caminho. Basta selecionar a "),es=i(ce,"CODE",{});var Ji=p(es);Fl=l(Ji,"AutoClass"),Ji.forEach(t),Nl=l(ce," apropriada para sua tarefa e seu tokenizer associado com "),as=i(ce,"CODE",{});var Qi=p(as);Ul=l(Qi,"AutoTokenizer"),Qi.forEach(t),Hl=l(ce,"."),ce.forEach(t),po=$(e),qe=i(e,"P",{});var $t=p(qe);Ll=l($t,"Vamos voltar ao nosso exemplo e ver como voc\xEA pode usar a "),ts=i($t,"CODE",{});var Ki=p(ts);Rl=l(Ki,"AutoClass"),Ki.forEach(t),Wl=l($t," para replicar os resultados do "),ss=i($t,"CODE",{});var Zi=p(ss);Vl=l(Zi,"pipeline()"),Zi.forEach(t),Gl=l($t,"."),$t.forEach(t),mo=$(e),Me=i(e,"H3",{class:!0});var Wo=p(Me);Ze=i(Wo,"A",{id:!0,class:!0,href:!0});var Xi=p(Ze);os=i(Xi,"SPAN",{});var ep=p(os);k(Ia.$$.fragment,ep),ep.forEach(t),Xi.forEach(t),Bl=$(Wo),rs=i(Wo,"SPAN",{});var ap=p(rs);Yl=l(ap,"AutoTokenizer"),ap.forEach(t),Wo.forEach(t),co=$(e),Te=i(e,"P",{});var ht=p(Te);Jl=l(ht,"Um tokenizer \xE9 respons\xE1vel por pr\xE9-processar o texto em um formato que seja compreens\xEDvel para o modelo. Primeiro, o tokenizer dividir\xE1 o texto em palavras chamadas "),ls=i(ht,"EM",{});var tp=p(ls);Ql=l(tp,"tokens"),tp.forEach(t),Kl=l(ht,". Existem v\xE1rias regras que regem o processo de tokeniza\xE7\xE3o, incluindo como dividir uma palavra e em que n\xEDvel (saiba mais sobre tokeniza\xE7\xE3o "),et=i(ht,"A",{href:!0});var sp=p(et);Zl=l(sp,"aqui"),sp.forEach(t),Xl=l(ht,"). A coisa mais importante a lembrar, por\xE9m, \xE9 que voc\xEA precisa instanciar o tokenizer com o mesmo nome do modelo para garantir que est\xE1 usando as mesmas regras de tokeniza\xE7\xE3o com as quais um modelo foi pr\xE9-treinado."),ht.forEach(t),uo=$(e),Xe=i(e,"P",{});var Vo=p(Xe);en=l(Vo,"Carregue um tokenizer com "),ns=i(Vo,"CODE",{});var op=p(ns);an=l(op,"AutoTokenizer"),op.forEach(t),tn=l(Vo,":"),Vo.forEach(t),fo=$(e),k(Fa.$$.fragment,e),$o=$(e),ea=i(e,"P",{});var Go=p(ea);sn=l(Go,"Em seguida, o tokenizer converte os tokens em n\xFAmeros para construir um tensor como entrada para o modelo. Isso \xE9 conhecido como o "),is=i(Go,"EM",{});var rp=p(is);on=l(rp,"vocabul\xE1rio"),rp.forEach(t),rn=l(Go," do modelo."),Go.forEach(t),ho=$(e),at=i(e,"P",{});var lp=p(at);ln=l(lp,"Passe o texto para o tokenizer:"),lp.forEach(t),_o=$(e),k(Na.$$.fragment,e),go=$(e),tt=i(e,"P",{});var np=p(tt);nn=l(np,"O tokenizer retornar\xE1 um dicion\xE1rio contendo:"),np.forEach(t),vo=$(e),aa=i(e,"UL",{});var Bo=p(aa);st=i(Bo,"LI",{});var Yn=p(st);ot=i(Yn,"A",{href:!0});var ip=p(ot);pn=l(ip,"input_ids"),ip.forEach(t),mn=l(Yn,": representa\xE7\xF5es num\xE9ricas de seus tokens."),Yn.forEach(t),cn=$(Bo),rt=i(Bo,"LI",{});var Jn=p(rt);lt=i(Jn,"A",{href:!0});var pp=p(lt);un=l(pp,"atttention_mask"),pp.forEach(t),fn=l(Jn,": indica quais tokens devem ser atendidos."),Jn.forEach(t),Bo.forEach(t),bo=$(e),ta=i(e,"P",{});var Yo=p(ta);dn=l(Yo,"Assim como o "),ps=i(Yo,"CODE",{});var mp=p(ps);$n=l(mp,"pipeline()"),mp.forEach(t),hn=l(Yo,", o tokenizer aceitar\xE1 uma lista de entradas. Al\xE9m disso, o tokenizer tamb\xE9m pode preencher e truncar o texto para retornar um lote com comprimento uniforme:"),Yo.forEach(t),Eo=$(e),k(sa.$$.fragment,e),ko=$(e),oa=i(e,"P",{});var Jo=p(oa);_n=l(Jo,"Leia o tutorial de "),nt=i(Jo,"A",{href:!0});var cp=p(nt);gn=l(cp,"pr\xE9-processamento"),cp.forEach(t),vn=l(Jo," para obter mais detalhes sobre tokeniza\xE7\xE3o."),Jo.forEach(t),wo=$(e),De=i(e,"H3",{class:!0});var Qo=p(De);ra=i(Qo,"A",{id:!0,class:!0,href:!0});var up=p(ra);ms=i(up,"SPAN",{});var fp=p(ms);k(Ua.$$.fragment,fp),fp.forEach(t),up.forEach(t),bn=$(Qo),cs=i(Qo,"SPAN",{});var dp=p(cs);En=l(dp,"AutoModel"),dp.forEach(t),Qo.forEach(t),jo=$(e),k(la.$$.fragment,e),Ao=$(e),k(na.$$.fragment,e),qo=$(e),B=i(e,"P",{});var se=p(B);kn=l(se,"Os modelos s\xE3o um standard "),Ha=i(se,"A",{href:!0,rel:!0});var $p=p(Ha);us=i($p,"CODE",{});var hp=p(us);wn=l(hp,"torch.nn.Module"),hp.forEach(t),$p.forEach(t),jn=l(se," ou um ["),fs=i(se,"CODE",{});var _p=p(fs);An=l(_p,"tf.keras.Model"),_p.forEach(t),qn=l(se,"](https: //"),La=i(se,"A",{href:!0,rel:!0});var gp=p(La);Tn=l(gp,"www.tensorflow.org/api_docs/python/tf/keras/Model"),gp.forEach(t),xn=l(se,") para que voc\xEA possa us\xE1-los em seu loop de treinamento habitual. No entanto, para facilitar as coisas, \u{1F917} Transformers fornece uma classe "),ds=i(se,"CODE",{});var vp=p(ds);yn=l(vp,"Trainer"),vp.forEach(t),Cn=l(se," para PyTorch que adiciona funcionalidade para treinamento distribu\xEDdo, precis\xE3o mista e muito mais. Para o TensorFlow, voc\xEA pode usar o m\xE9todo "),$s=i(se,"CODE",{});var bp=p($s);zn=l(bp,"fit"),bp.forEach(t),Pn=l(se," de "),Ra=i(se,"A",{href:!0,rel:!0});var Ep=p(Ra);On=l(Ep,"Keras"),Ep.forEach(t),Mn=l(se,". Consulte o "),it=i(se,"A",{href:!0});var kp=p(it);Dn=l(kp,"tutorial de treinamento"),kp.forEach(t),Sn=l(se," para obter mais detalhes."),se.forEach(t),To=$(e),k(ia.$$.fragment,e),xo=$(e),Se=i(e,"H3",{class:!0});var Ko=p(Se);pa=i(Ko,"A",{id:!0,class:!0,href:!0});var wp=p(pa);hs=i(wp,"SPAN",{});var jp=p(hs);k(Wa.$$.fragment,jp),jp.forEach(t),wp.forEach(t),In=$(Ko),_s=i(Ko,"SPAN",{});var Ap=p(_s);Fn=l(Ap,"Salvar um modelo"),Ap.forEach(t),Ko.forEach(t),yo=$(e),k(ma.$$.fragment,e),Co=$(e),xe=i(e,"P",{});var _t=p(xe);Nn=l(_t,"Um recurso particularmente interessante dos \u{1F917} Transformers \xE9 a capacidade de salvar um modelo e recarreg\xE1-lo como um modelo PyTorch ou TensorFlow. Use "),gs=i(_t,"CODE",{});var qp=p(gs);Un=l(qp,"from_pt"),qp.forEach(t),Hn=l(_t," ou "),vs=i(_t,"CODE",{});var Tp=p(vs);Ln=l(Tp,"from_tf"),Tp.forEach(t),Rn=l(_t," para converter o modelo de um framework para outro:"),_t.forEach(t),zo=$(e),k(ca.$$.fragment,e),this.h()},h(){b(s,"name","hf:doc:metadata"),b(s,"content",JSON.stringify(um)),b(f,"id","tour-rpido"),b(f,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),b(f,"href","#tour-rpido"),b(o,"class","relative group"),b(D,"href","./model_doc/auto"),b(V,"id","pipeline"),b(V,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),b(V,"href","#pipeline"),b(J,"class","relative group"),b(He,"id","uso-da-pipeline"),b(He,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),b(He,"href","#uso-da-pipeline"),b(ze,"class","relative group"),b(va,"href","https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english"),b(va,"rel","nofollow"),b(ka,"href","https://huggingface.co/docs/datasets/"),b(ka,"rel","nofollow"),b(Aa,"href","https://huggingface.co/docs/datasets/quickstart.html"),b(Aa,"rel","nofollow"),b(qa,"href","https://huggingface.co/datasets/PolyAI/minds14"),b(qa,"rel","nofollow"),b(Qa,"href","./main_classes/pipelines"),b(Je,"id","use-outro-modelo-e-tokenizer-na-pipeline"),b(Je,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),b(Je,"href","#use-outro-modelo-e-tokenizer-na-pipeline"),b(Pe,"class","relative group"),b(za,"href","https://huggingface.co/models"),b(za,"rel","nofollow"),b(Pa,"href","https://huggingface.co/nlptown/bert-base-multilingual-uncased-sentiment"),b(Pa,"rel","nofollow"),b(Ka,"href","./training"),b(Za,"href","./model_sharing"),b(Ke,"id","autoclass"),b(Ke,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),b(Ke,"href","#autoclass"),b(Oe,"class","relative group"),b(Xa,"href","./model_doc/auto"),b(Ze,"id","autotokenizer"),b(Ze,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),b(Ze,"href","#autotokenizer"),b(Me,"class","relative group"),b(et,"href","./tokenizer_summary"),b(ot,"href","./glossary#input-ids"),b(lt,"href",".glossary#attention-mask"),b(nt,"href","./pr%C3%A9-processamento"),b(ra,"id","automodel"),b(ra,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),b(ra,"href","#automodel"),b(De,"class","relative group"),b(Ha,"href","https://pytorch.org/docs/stable/nn.html#torch.nn.Module"),b(Ha,"rel","nofollow"),b(La,"href","http://www.tensorflow.org/api_docs/python/tf/keras/Model"),b(La,"rel","nofollow"),b(Ra,"href","https://keras.io/"),b(Ra,"rel","nofollow"),b(it,"href","./training"),b(pa,"id","salvar-um-modelo"),b(pa,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),b(pa,"href","#salvar-um-modelo"),b(Se,"class","relative group")},m(e,m){a(document.head,s),c(e,u,m),c(e,o,m),a(o,f),a(f,h),w(g,h,null),a(o,C),a(o,M),a(M,x),c(e,O,m),w(S,e,m),c(e,I,m),c(e,F,m),a(F,U),a(F,z),a(z,_),a(F,y),a(F,D),a(D,W),a(F,R),c(e,oe,m),w(Y,e,m),c(e,ee,m),c(e,J,m),a(J,V),a(V,X),w(Q,X,null),a(J,K),a(J,ue),a(ue,re),c(e,de,m),c(e,le,m),a(le,ae),a(ae,ne),a(le,$e),c(e,T,m),w(N,e,m),c(e,ie,m),c(e,v,m),a(v,H),a(v,me),a(me,Ce),a(v,fe),c(e,ge,m),c(e,pe,m),a(pe,Fe),a(Fe,ve),a(pe,Ba),c(e,da,m),c(e,G,m),a(G,bt),a(bt,Zo),a(G,Xo),a(G,Et),a(Et,er),a(G,ar),a(G,kt),a(kt,tr),a(G,sr),a(G,wt),a(wt,or),a(G,rr),a(G,jt),a(jt,lr),a(G,nr),a(G,At),a(At,ir),a(G,pr),a(G,qt),a(qt,mr),a(G,cr),a(G,Tt),a(Tt,ur),c(e,Ts,m),c(e,$a,m),a($a,xt),a(xt,fr),a($a,dr),c(e,xs,m),c(e,be,m),a(be,yt),a(yt,$r),a(be,hr),a(be,Ct),a(Ct,_r),a(be,gr),a(be,zt),a(zt,vr),c(e,ys,m),c(e,ha,m),a(ha,Pt),a(Pt,br),a(ha,Er),c(e,Cs,m),c(e,Ne,m),a(Ne,Ot),a(Ot,kr),a(Ne,wr),a(Ne,Mt),a(Mt,jr),c(e,zs,m),w(Ue,e,m),c(e,Ps,m),c(e,ze,m),a(ze,He),a(He,Dt),w(_a,Dt,null),a(ze,Ar),a(ze,St),a(St,qr),c(e,Os,m),c(e,Le,m),a(Le,Tr),a(Le,It),a(It,xr),a(Le,yr),c(e,Ms,m),c(e,Ya,m),a(Ya,Cr),c(e,Ds,m),w(Re,e,m),c(e,Ss,m),c(e,We,m),a(We,zr),a(We,Ft),a(Ft,Pr),a(We,Or),c(e,Is,m),w(ga,e,m),c(e,Fs,m),c(e,Ee,m),a(Ee,Mr),a(Ee,va),a(va,Dr),a(Ee,Sr),a(Ee,Nt),a(Nt,Ir),a(Ee,Fr),c(e,Ns,m),w(ba,e,m),c(e,Us,m),c(e,Ve,m),a(Ve,Nr),a(Ve,Ut),a(Ut,Ur),a(Ve,Hr),c(e,Hs,m),w(Ea,e,m),c(e,Ls,m),c(e,ke,m),a(ke,Lr),a(ke,Ht),a(Ht,Rr),a(ke,Wr),a(ke,ka),a(ka,Vr),a(ke,Gr),c(e,Rs,m),w(wa,e,m),c(e,Ws,m),c(e,Ge,m),a(Ge,Br),a(Ge,Lt),a(Lt,Yr),a(Ge,Jr),c(e,Vs,m),w(ja,e,m),c(e,Gs,m),c(e,we,m),a(we,Qr),a(we,Aa),a(Aa,Kr),a(we,Zr),a(we,qa),a(qa,Xr),a(we,el),c(e,Bs,m),w(Ta,e,m),c(e,Ys,m),c(e,Ja,m),a(Ja,al),c(e,Js,m),w(xa,e,m),c(e,Qs,m),c(e,Be,m),a(Be,tl),a(Be,Rt),a(Rt,sl),a(Be,ol),c(e,Ks,m),w(ya,e,m),c(e,Zs,m),c(e,Ye,m),a(Ye,rl),a(Ye,Qa),a(Qa,ll),a(Ye,nl),c(e,Xs,m),c(e,Pe,m),a(Pe,Je),a(Je,Wt),w(Ca,Wt,null),a(Pe,il),a(Pe,Vt),a(Vt,pl),c(e,eo,m),c(e,he,m),a(he,ml),a(he,Gt),a(Gt,cl),a(he,ul),a(he,za),a(za,fl),a(he,dl),a(he,Pa),a(Pa,$l),a(he,hl),c(e,ao,m),w(Oa,e,m),c(e,to,m),w(Qe,e,m),c(e,so,m),c(e,je,m),a(je,_l),a(je,Bt),a(Bt,gl),a(je,vl),a(je,Yt),a(Yt,bl),a(je,El),c(e,oo,m),w(Ma,e,m),c(e,ro,m),c(e,Ae,m),a(Ae,kl),a(Ae,Ka),a(Ka,wl),a(Ae,jl),a(Ae,Za),a(Za,Al),a(Ae,ql),c(e,lo,m),c(e,Oe,m),a(Oe,Ke),a(Ke,Jt),w(Da,Jt,null),a(Oe,Tl),a(Oe,Qt),a(Qt,xl),c(e,no,m),w(Sa,e,m),c(e,io,m),c(e,Z,m),a(Z,yl),a(Z,Kt),a(Kt,Cl),a(Z,zl),a(Z,Zt),a(Zt,Pl),a(Z,Ol),a(Z,Xt),a(Xt,Ml),a(Z,Dl),a(Z,Xa),a(Xa,Sl),a(Z,Il),a(Z,es),a(es,Fl),a(Z,Nl),a(Z,as),a(as,Ul),a(Z,Hl),c(e,po,m),c(e,qe,m),a(qe,Ll),a(qe,ts),a(ts,Rl),a(qe,Wl),a(qe,ss),a(ss,Vl),a(qe,Gl),c(e,mo,m),c(e,Me,m),a(Me,Ze),a(Ze,os),w(Ia,os,null),a(Me,Bl),a(Me,rs),a(rs,Yl),c(e,co,m),c(e,Te,m),a(Te,Jl),a(Te,ls),a(ls,Ql),a(Te,Kl),a(Te,et),a(et,Zl),a(Te,Xl),c(e,uo,m),c(e,Xe,m),a(Xe,en),a(Xe,ns),a(ns,an),a(Xe,tn),c(e,fo,m),w(Fa,e,m),c(e,$o,m),c(e,ea,m),a(ea,sn),a(ea,is),a(is,on),a(ea,rn),c(e,ho,m),c(e,at,m),a(at,ln),c(e,_o,m),w(Na,e,m),c(e,go,m),c(e,tt,m),a(tt,nn),c(e,vo,m),c(e,aa,m),a(aa,st),a(st,ot),a(ot,pn),a(st,mn),a(aa,cn),a(aa,rt),a(rt,lt),a(lt,un),a(rt,fn),c(e,bo,m),c(e,ta,m),a(ta,dn),a(ta,ps),a(ps,$n),a(ta,hn),c(e,Eo,m),w(sa,e,m),c(e,ko,m),c(e,oa,m),a(oa,_n),a(oa,nt),a(nt,gn),a(oa,vn),c(e,wo,m),c(e,De,m),a(De,ra),a(ra,ms),w(Ua,ms,null),a(De,bn),a(De,cs),a(cs,En),c(e,jo,m),w(la,e,m),c(e,Ao,m),w(na,e,m),c(e,qo,m),c(e,B,m),a(B,kn),a(B,Ha),a(Ha,us),a(us,wn),a(B,jn),a(B,fs),a(fs,An),a(B,qn),a(B,La),a(La,Tn),a(B,xn),a(B,ds),a(ds,yn),a(B,Cn),a(B,$s),a($s,zn),a(B,Pn),a(B,Ra),a(Ra,On),a(B,Mn),a(B,it),a(it,Dn),a(B,Sn),c(e,To,m),w(ia,e,m),c(e,xo,m),c(e,Se,m),a(Se,pa),a(pa,hs),w(Wa,hs,null),a(Se,In),a(Se,_s),a(_s,Fn),c(e,yo,m),w(ma,e,m),c(e,Co,m),c(e,xe,m),a(xe,Nn),a(xe,gs),a(gs,Un),a(xe,Hn),a(xe,vs),a(vs,Ln),a(xe,Rn),c(e,zo,m),w(ca,e,m),Po=!0},p(e,[m]){const Va={};m&2&&(Va.$$scope={dirty:m,ctx:e}),Y.$set(Va);const bs={};m&2&&(bs.$$scope={dirty:m,ctx:e}),Ue.$set(bs);const Es={};m&2&&(Es.$$scope={dirty:m,ctx:e}),Re.$set(Es);const ks={};m&2&&(ks.$$scope={dirty:m,ctx:e}),Qe.$set(ks);const Ie={};m&2&&(Ie.$$scope={dirty:m,ctx:e}),sa.$set(Ie);const ws={};m&2&&(ws.$$scope={dirty:m,ctx:e}),la.$set(ws);const js={};m&2&&(js.$$scope={dirty:m,ctx:e}),na.$set(js);const Ga={};m&2&&(Ga.$$scope={dirty:m,ctx:e}),ia.$set(Ga);const As={};m&2&&(As.$$scope={dirty:m,ctx:e}),ma.$set(As);const qs={};m&2&&(qs.$$scope={dirty:m,ctx:e}),ca.$set(qs)},i(e){Po||(j(g.$$.fragment,e),j(S.$$.fragment,e),j(Y.$$.fragment,e),j(Q.$$.fragment,e),j(N.$$.fragment,e),j(Ue.$$.fragment,e),j(_a.$$.fragment,e),j(Re.$$.fragment,e),j(ga.$$.fragment,e),j(ba.$$.fragment,e),j(Ea.$$.fragment,e),j(wa.$$.fragment,e),j(ja.$$.fragment,e),j(Ta.$$.fragment,e),j(xa.$$.fragment,e),j(ya.$$.fragment,e),j(Ca.$$.fragment,e),j(Oa.$$.fragment,e),j(Qe.$$.fragment,e),j(Ma.$$.fragment,e),j(Da.$$.fragment,e),j(Sa.$$.fragment,e),j(Ia.$$.fragment,e),j(Fa.$$.fragment,e),j(Na.$$.fragment,e),j(sa.$$.fragment,e),j(Ua.$$.fragment,e),j(la.$$.fragment,e),j(na.$$.fragment,e),j(ia.$$.fragment,e),j(Wa.$$.fragment,e),j(ma.$$.fragment,e),j(ca.$$.fragment,e),Po=!0)},o(e){A(g.$$.fragment,e),A(S.$$.fragment,e),A(Y.$$.fragment,e),A(Q.$$.fragment,e),A(N.$$.fragment,e),A(Ue.$$.fragment,e),A(_a.$$.fragment,e),A(Re.$$.fragment,e),A(ga.$$.fragment,e),A(ba.$$.fragment,e),A(Ea.$$.fragment,e),A(wa.$$.fragment,e),A(ja.$$.fragment,e),A(Ta.$$.fragment,e),A(xa.$$.fragment,e),A(ya.$$.fragment,e),A(Ca.$$.fragment,e),A(Oa.$$.fragment,e),A(Qe.$$.fragment,e),A(Ma.$$.fragment,e),A(Da.$$.fragment,e),A(Sa.$$.fragment,e),A(Ia.$$.fragment,e),A(Fa.$$.fragment,e),A(Na.$$.fragment,e),A(sa.$$.fragment,e),A(Ua.$$.fragment,e),A(la.$$.fragment,e),A(na.$$.fragment,e),A(ia.$$.fragment,e),A(Wa.$$.fragment,e),A(ma.$$.fragment,e),A(ca.$$.fragment,e),Po=!1},d(e){t(s),e&&t(u),e&&t(o),q(g),e&&t(O),q(S,e),e&&t(I),e&&t(F),e&&t(oe),q(Y,e),e&&t(ee),e&&t(J),q(Q),e&&t(de),e&&t(le),e&&t(T),q(N,e),e&&t(ie),e&&t(v),e&&t(ge),e&&t(pe),e&&t(da),e&&t(G),e&&t(Ts),e&&t($a),e&&t(xs),e&&t(be),e&&t(ys),e&&t(ha),e&&t(Cs),e&&t(Ne),e&&t(zs),q(Ue,e),e&&t(Ps),e&&t(ze),q(_a),e&&t(Os),e&&t(Le),e&&t(Ms),e&&t(Ya),e&&t(Ds),q(Re,e),e&&t(Ss),e&&t(We),e&&t(Is),q(ga,e),e&&t(Fs),e&&t(Ee),e&&t(Ns),q(ba,e),e&&t(Us),e&&t(Ve),e&&t(Hs),q(Ea,e),e&&t(Ls),e&&t(ke),e&&t(Rs),q(wa,e),e&&t(Ws),e&&t(Ge),e&&t(Vs),q(ja,e),e&&t(Gs),e&&t(we),e&&t(Bs),q(Ta,e),e&&t(Ys),e&&t(Ja),e&&t(Js),q(xa,e),e&&t(Qs),e&&t(Be),e&&t(Ks),q(ya,e),e&&t(Zs),e&&t(Ye),e&&t(Xs),e&&t(Pe),q(Ca),e&&t(eo),e&&t(he),e&&t(ao),q(Oa,e),e&&t(to),q(Qe,e),e&&t(so),e&&t(je),e&&t(oo),q(Ma,e),e&&t(ro),e&&t(Ae),e&&t(lo),e&&t(Oe),q(Da),e&&t(no),q(Sa,e),e&&t(io),e&&t(Z),e&&t(po),e&&t(qe),e&&t(mo),e&&t(Me),q(Ia),e&&t(co),e&&t(Te),e&&t(uo),e&&t(Xe),e&&t(fo),q(Fa,e),e&&t($o),e&&t(ea),e&&t(ho),e&&t(at),e&&t(_o),q(Na,e),e&&t(go),e&&t(tt),e&&t(vo),e&&t(aa),e&&t(bo),e&&t(ta),e&&t(Eo),q(sa,e),e&&t(ko),e&&t(oa),e&&t(wo),e&&t(De),q(Ua),e&&t(jo),q(la,e),e&&t(Ao),q(na,e),e&&t(qo),e&&t(B),e&&t(To),q(ia,e),e&&t(xo),e&&t(Se),q(Wa),e&&t(yo),q(ma,e),e&&t(Co),e&&t(xe),e&&t(zo),q(ca,e)}}}const um={local:"tour-rpido",sections:[{local:"pipeline",sections:[{local:"uso-da-pipeline",title:"Uso da pipeline"},{local:"use-outro-modelo-e-tokenizer-na-pipeline",title:"Use outro modelo e tokenizer na pipeline"}],title:"Pipeline"},{local:"autoclass",sections:[{local:"autotokenizer",title:"AutoTokenizer"},{local:"automodel",title:"AutoModel"},{local:"salvar-um-modelo",title:"Salvar um modelo"}],title:"AutoClass"}],title:"Tour r\xE1pido"};function fm(P){return Op(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class Em extends yp{constructor(s){super();Cp(this,s,fm,cm,zp,{})}}export{Em as default,um as metadata};
485
0
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/_app
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/_app/pages/training.mdx-hf-doc-builder.js
import{S as Nm,i as Im,s as Lm,e as o,k as m,w as u,t as i,M as Mm,c as r,d as a,m as c,a as n,x as f,h as p,b as d,G as s,g as l,y as h,q as g,o as _,B as v,v as Hm}from"../chunks/vendor-hf-doc-builder.js";import{T as Mi}from"../chunks/Tip-hf-doc-builder.js";import{Y as Kr}from"../chunks/Youtube-hf-doc-builder.js";import{I as T}from"../chunks/IconCopyLink-hf-doc-builder.js";import{C as j}from"../chunks/CodeBlock-hf-doc-builder.js";import{D as Rm}from"../chunks/DocNotebookDropdown-hf-doc-builder.js";function Bm(oe){let $,y;return{c(){$=o("p"),y=i(`Voc\xEA ver\xE1 um alerta sobre alguns pesos pr\xE9-treinados que n\xE3o est\xE3o sendo utilizados e que alguns pesos est\xE3o sendo inicializados aleatoriamente. N\xE3o se preocupe, essa mensagem \xE9 completamente normal. O header/cabe\xE7\xE1rio pr\xE9-treinado do modelo BERT \xE9 descartado e substitui-se por um header de classifica\xE7\xE3o inicializado aleatoriamente. Assim, pode aplicar o fine-tuning a este novo header do modelo em sua tarefa de classifica\xE7\xE3o de sequ\xEAncias fazendo um transfer learning do modelo pr\xE9-treinado.`)},l(b){$=r(b,"P",{});var k=n($);y=p(k,`Voc\xEA ver\xE1 um alerta sobre alguns pesos pr\xE9-treinados que n\xE3o est\xE3o sendo utilizados e que alguns pesos est\xE3o sendo inicializados aleatoriamente. N\xE3o se preocupe, essa mensagem \xE9 completamente normal. O header/cabe\xE7\xE1rio pr\xE9-treinado do modelo BERT \xE9 descartado e substitui-se por um header de classifica\xE7\xE3o inicializado aleatoriamente. Assim, pode aplicar o fine-tuning a este novo header do modelo em sua tarefa de classifica\xE7\xE3o de sequ\xEAncias fazendo um transfer learning do modelo pr\xE9-treinado.`),k.forEach(a)},m(b,k){l(b,$,k),s($,y)},d(b){b&&a($)}}}function Km(oe){let $,y,b,k,P,E,O,z;return{c(){$=o("p"),y=i("O "),b=o("code"),k=i("Trainer"),P=i(" utiliza "),E=o("code"),O=i("DataCollatorWithPadding"),z=i(` por padr\xE3o, ent\xE3o voc\xEA n\xE3o precisa especificar explicitamente um colador de dados (data collator).`)},l(A){$=r(A,"P",{});var w=n($);y=p(w,"O "),b=r(w,"CODE",{});var C=n(b);k=p(C,"Trainer"),C.forEach(a),P=p(w," utiliza "),E=r(w,"CODE",{});var F=n(E);O=p(F,"DataCollatorWithPadding"),F.forEach(a),z=p(w,` por padr\xE3o, ent\xE3o voc\xEA n\xE3o precisa especificar explicitamente um colador de dados (data collator).`),w.forEach(a)},m(A,w){l(A,$,w),s($,y),s($,b),s(b,k),s($,P),s($,E),s(E,O),s($,z)},d(A){A&&a($)}}}function Um(oe){let $,y,b,k,P,E,O,z;return{c(){$=o("p"),y=i(`Se necess\xE1rio, voc\xEA pode obter o acesso gratuito a uma GPU na n\xFAvem por meio de um notebook no `),b=o("a"),k=i("Colaboratory"),P=i(" ou "),E=o("a"),O=i("SageMaker StudioLab"),z=i(` se n\xE3o tiver esse recurso de forma local.`),this.h()},l(A){$=r(A,"P",{});var w=n($);y=p(w,`Se necess\xE1rio, voc\xEA pode obter o acesso gratuito a uma GPU na n\xFAvem por meio de um notebook no `),b=r(w,"A",{href:!0,rel:!0});var C=n(b);k=p(C,"Colaboratory"),C.forEach(a),P=p(w," ou "),E=r(w,"A",{href:!0,rel:!0});var F=n(E);O=p(F,"SageMaker StudioLab"),F.forEach(a),z=p(w,` se n\xE3o tiver esse recurso de forma local.`),w.forEach(a),this.h()},h(){d(b,"href","https://colab.research.google.com/"),d(b,"rel","nofollow"),d(E,"href","https://studiolab.sagemaker.aws/"),d(E,"rel","nofollow")},m(A,w){l(A,$,w),s($,y),s($,b),s(b,k),s($,P),s($,E),s(E,O),s($,z)},d(A){A&&a($)}}}function Wm(oe){let $,y,b,k,P,E,O,z,A,w,C,F,Ga,Ur,xt,N,Le,Wr,bs,Gr,Yr,Qr,js,Vr,Jr,ws,Xr,St,Ya,Ot,B,re,ks,Me,Zr,Es,en,Ft,He,Nt,Qa,an,It,ne,sn,Re,tn,on,Lt,Be,Mt,le,rn,Ke,ys,nn,ln,Ht,Ue,Rt,Va,pn,Bt,We,Kt,Ja,Ut,K,ie,qs,Ge,mn,Xa,cn,As,dn,Wt,Ye,Gt,I,un,Ts,fn,hn,Ps,gn,_n,Yt,pe,vn,Qe,$n,bn,Qt,Ve,Vt,me,Jt,U,ce,zs,Je,jn,Cs,wn,Xt,L,kn,Ds,En,yn,Xe,qn,An,Zt,Za,Tn,eo,Ze,ao,W,de,xs,ea,Pn,Ss,zn,so,q,Cn,Os,Dn,xn,Fs,Sn,On,aa,Ns,Fn,Nn,Is,In,Ln,sa,Mn,Hn,to,ta,oo,D,Rn,Ls,Bn,Kn,Ms,Un,Wn,Hs,Gn,Yn,ro,oa,no,ue,Qn,Rs,Vn,Jn,lo,ra,io,G,fe,Bs,na,Xn,Ks,Zn,po,he,el,Us,al,sl,mo,la,co,ge,tl,Ws,ol,rl,uo,ia,fo,es,ho,Y,_e,Gs,pa,nl,Ys,ll,go,ma,_o,as,il,vo,Q,ve,Qs,ca,pl,Vs,ml,$o,M,cl,Js,dl,ul,Xs,fl,hl,bo,da,jo,$e,wo,x,gl,ua,Zs,_l,vl,et,$l,bl,at,jl,wl,ko,fa,Eo,V,be,st,ha,kl,tt,El,yo,ss,yl,qo,ga,Ao,je,ql,_a,ot,Al,Tl,To,va,Po,ts,zo,J,we,rt,$a,Pl,nt,zl,Co,ba,Do,ke,Cl,lt,Dl,xl,xo,os,Sl,So,ja,Oo,Ee,Ol,it,Fl,Nl,Fo,H,wa,ka,Il,pt,Ll,Ml,Hl,Ea,Rl,ya,X,Bl,mt,Kl,Ul,ct,Wl,Gl,Yl,qa,Ql,Aa,dt,Vl,Jl,Ta,No,rs,Xl,Io,Pa,Lo,Z,ye,ut,za,Zl,ft,ei,Mo,qe,ai,ht,si,ti,Ho,Ca,Ro,ns,oi,Bo,Da,Ko,ee,Ae,gt,xa,ri,_t,ni,Uo,Te,li,Sa,vt,ii,pi,Wo,Oa,Go,Pe,mi,$t,ci,di,Yo,Fa,Qo,ze,ui,bt,fi,hi,Vo,Na,Jo,Ce,Xo,ls,gi,Zo,ae,De,jt,Ia,_i,wt,vi,er,xe,$i,La,bi,ji,ar,Ma,sr,se,Se,kt,Ha,wi,Et,ki,tr,R,Ei,yt,yi,qi,Ra,qt,Ai,Ti,or,Ba,rr,is,nr,te,Oe,At,Ka,Pi,Tt,zi,lr,ps,Ci,ir,Fe,Pt,ms,Ua,Di,xi,Si,zt,cs,ds,Oi,Fi,pr;return E=new T({}),C=new Rm({props:{classNames:"absolute z-10 right-0 top-0",options:[{label:"Mixed",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/main/transformers_doc/pt/training.ipynb"},{label:"PyTorch",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/main/transformers_doc/pt/pytorch/training.ipynb"},{label:"TensorFlow",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/main/transformers_doc/pt/tensorflow/training.ipynb"},{label:"Mixed",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/transformers_doc/pt/training.ipynb"},{label:"PyTorch",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/transformers_doc/pt/pytorch/training.ipynb"},{label:"TensorFlow",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/transformers_doc/pt/tensorflow/training.ipynb"}]}}),Me=new T({}),He=new Kr({props:{id:"_BZearw7f0w"}}),Be=new j({props:{code:`from datasets import load_dataset dataset = load_dataset("yelp_review_full") dataset[100]`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;yelp_review_full&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>dataset[<span class="hljs-number">100</span>] {<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-number">0</span>, <span class="hljs-string">&#x27;text&#x27;</span>: <span class="hljs-string">&#x27;My expectations for McDonalds are t rarely high. But for one to still fail so spectacularly...that takes something special!\\\\nThe cashier took my friends\\&#x27;s order, then promptly ignored me. I had to force myself in front of a cashier who opened his register to wait on the person BEHIND me. I waited over five minutes for a gigantic order that included precisely one kid\\&#x27;s meal. After watching two people who ordered after me be handed their food, I asked where mine was. The manager started yelling at the cashiers for \\\\&quot;serving off their orders\\\\&quot; when they didn\\&#x27;t have their food. But neither cashier was anywhere near those controls, and the manager was the one serving food to customers and clearing the boards.\\\\nThe manager was rude when giving me my order. She didn\\&#x27;t make sure that I had everything ON MY RECEIPT, and never even had the decency to apologize that I felt I was getting poor service.\\\\nI\\&#x27;ve eaten at various McDonalds restaurants for over 30 years. I\\&#x27;ve worked at more than one location. I expect bad days, bad moods, and the occasional mistake. But I have yet to have a decent experience at this store. It will remain a place I avoid unless someone in my party needs to avoid illness from low blood sugar. Perhaps I should go back to the racially biased service of Steak n Shake instead!&#x27;</span>}`}}),Ue=new j({props:{code:`from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") def tokenize_function(examples): return tokenizer(examples["text"], padding="max_length", truncation=True) tokenized_datasets = dataset.map(tokenize_function, batched=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">tokenize_function</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> tokenizer(examples[<span class="hljs-string">&quot;text&quot;</span>], padding=<span class="hljs-string">&quot;max_length&quot;</span>, truncation=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenized_datasets = dataset.<span class="hljs-built_in">map</span>(tokenize_function, batched=<span class="hljs-literal">True</span>)`}}),We=new j({props:{code:`small_train_dataset = tokenized_datasets["train"].shuffle(seed=42).select(range(1000)) small_eval_dataset = tokenized_datasets["test"].shuffle(seed=42).select(range(1000))`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>small_train_dataset = tokenized_datasets[<span class="hljs-string">&quot;train&quot;</span>].shuffle(seed=<span class="hljs-number">42</span>).select(<span class="hljs-built_in">range</span>(<span class="hljs-number">1000</span>)) <span class="hljs-meta">&gt;&gt;&gt; </span>small_eval_dataset = tokenized_datasets[<span class="hljs-string">&quot;test&quot;</span>].shuffle(seed=<span class="hljs-number">42</span>).select(<span class="hljs-built_in">range</span>(<span class="hljs-number">1000</span>))`}}),Ge=new T({}),Ye=new Kr({props:{id:"nvBXf7s7vTI"}}),Ve=new j({props:{code:`from transformers import AutoModelForSequenceClassification model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", num_labels=5)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>, num_labels=<span class="hljs-number">5</span>)`}}),me=new Mi({props:{$$slots:{default:[Bm]},$$scope:{ctx:oe}}}),Je=new T({}),Ze=new j({props:{code:`from transformers import TrainingArguments training_args = TrainingArguments(output_dir="test_trainer")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TrainingArguments <span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments(output_dir=<span class="hljs-string">&quot;test_trainer&quot;</span>)`}}),ea=new T({}),ta=new j({props:{code:`import numpy as np from datasets import load_metric metric = load_metric("accuracy")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> numpy <span class="hljs-keyword">as</span> np <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_metric <span class="hljs-meta">&gt;&gt;&gt; </span>metric = load_metric(<span class="hljs-string">&quot;accuracy&quot;</span>)`}}),oa=new j({props:{code:`def compute_metrics(eval_pred): logits, labels = eval_pred predictions = np.argmax(logits, axis=-1) return metric.compute(predictions=predictions, references=labels)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">compute_metrics</span>(<span class="hljs-params">eval_pred</span>): <span class="hljs-meta">... </span> logits, labels = eval_pred <span class="hljs-meta">... </span> predictions = np.argmax(logits, axis=-<span class="hljs-number">1</span>) <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> metric.compute(predictions=predictions, references=labels)`}}),ra=new j({props:{code:`from transformers import TrainingArguments training_args = TrainingArguments(output_dir="test_trainer", evaluation_strategy="epoch")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TrainingArguments <span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments(output_dir=<span class="hljs-string">&quot;test_trainer&quot;</span>, evaluation_strategy=<span class="hljs-string">&quot;epoch&quot;</span>)`}}),na=new T({}),la=new j({props:{code:`trainer = Trainer( model=model, args=training_args, train_dataset=small_train_dataset, eval_dataset=small_eval_dataset, compute_metrics=compute_metrics, )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=small_train_dataset, <span class="hljs-meta">... </span> eval_dataset=small_eval_dataset, <span class="hljs-meta">... </span> compute_metrics=compute_metrics, <span class="hljs-meta">... </span>)`}}),ia=new j({props:{code:"trainer.train()",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()'}}),pa=new T({}),ma=new Kr({props:{id:"rnTGBy2ax1c"}}),ca=new T({}),da=new j({props:{code:`from transformers import DefaultDataCollator data_collator = DefaultDataCollator(return_tensors="tf")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DefaultDataCollator <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DefaultDataCollator(return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)`}}),$e=new Mi({props:{$$slots:{default:[Km]},$$scope:{ctx:oe}}}),fa=new j({props:{code:`tf_train_dataset = small_train_dataset.to_tf_dataset( columns=["attention_mask", "input_ids", "token_type_ids"], label_cols=["labels"], shuffle=True, collate_fn=data_collator, batch_size=8, ) tf_validation_dataset = small_eval_dataset.to_tf_dataset( columns=["attention_mask", "input_ids", "token_type_ids"], label_cols=["labels"], shuffle=False, collate_fn=data_collator, batch_size=8, )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>tf_train_dataset = small_train_dataset.to_tf_dataset( <span class="hljs-meta">... </span> columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;token_type_ids&quot;</span>], <span class="hljs-meta">... </span> label_cols=[<span class="hljs-string">&quot;labels&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">8</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_validation_dataset = small_eval_dataset.to_tf_dataset( <span class="hljs-meta">... </span> columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;token_type_ids&quot;</span>], <span class="hljs-meta">... </span> label_cols=[<span class="hljs-string">&quot;labels&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">8</span>, <span class="hljs-meta">... </span>)`}}),ha=new T({}),ga=new j({props:{code:`import tensorflow as tf from transformers import TFAutoModelForSequenceClassification model = TFAutoModelForSequenceClassification.from_pretrained("bert-base-cased", num_labels=5)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>, num_labels=<span class="hljs-number">5</span>)`}}),va=new j({props:{code:`model.compile( optimizer=tf.keras.optimizers.Adam(learning_rate=5e-5), loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=tf.metrics.SparseCategoricalAccuracy(), ) model.fit(tf_train_dataset, validation_data=tf_validation_dataset, epochs=3)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>model.<span class="hljs-built_in">compile</span>( <span class="hljs-meta">... </span> optimizer=tf.keras.optimizers.Adam(learning_rate=<span class="hljs-number">5e-5</span>), <span class="hljs-meta">... </span> loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=<span class="hljs-literal">True</span>), <span class="hljs-meta">... </span> metrics=tf.metrics.SparseCategoricalAccuracy(), <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.fit(tf_train_dataset, validation_data=tf_validation_dataset, epochs=<span class="hljs-number">3</span>)`}}),$a=new T({}),ba=new Kr({props:{id:"Dh9CL8fyG80"}}),ja=new j({props:{code:`del model del pytorch_model del trainer torch.cuda.empty_cache()`,highlighted:`<span class="hljs-keyword">del</span> model <span class="hljs-keyword">del</span> pytorch_model <span class="hljs-keyword">del</span> trainer torch.cuda.empty_cache()`}}),Ea=new j({props:{code:'tokenized_datasets = tokenized_datasets.remove_columns(["text"])',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>tokenized_datasets = tokenized_datasets.remove_columns([<span class="hljs-string">&quot;text&quot;</span>])'}}),qa=new j({props:{code:'tokenized_datasets = tokenized_datasets.rename_column("label", "labels")',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>tokenized_datasets = tokenized_datasets.rename_column(<span class="hljs-string">&quot;label&quot;</span>, <span class="hljs-string">&quot;labels&quot;</span>)'}}),Ta=new j({props:{code:'tokenized_datasets.set_format("torch")',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>tokenized_datasets.set_format(<span class="hljs-string">&quot;torch&quot;</span>)'}}),Pa=new j({props:{code:`small_train_dataset = tokenized_datasets["train"].shuffle(seed=42).select(range(1000)) small_eval_dataset = tokenized_datasets["test"].shuffle(seed=42).select(range(1000))`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>small_train_dataset = tokenized_datasets[<span class="hljs-string">&quot;train&quot;</span>].shuffle(seed=<span class="hljs-number">42</span>).select(<span class="hljs-built_in">range</span>(<span class="hljs-number">1000</span>)) <span class="hljs-meta">&gt;&gt;&gt; </span>small_eval_dataset = tokenized_datasets[<span class="hljs-string">&quot;test&quot;</span>].shuffle(seed=<span class="hljs-number">42</span>).select(<span class="hljs-built_in">range</span>(<span class="hljs-number">1000</span>))`}}),za=new T({}),Ca=new j({props:{code:`from torch.utils.data import DataLoader train_dataloader = DataLoader(small_train_dataset, shuffle=True, batch_size=8) eval_dataloader = DataLoader(small_eval_dataset, batch_size=8)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> torch.utils.data <span class="hljs-keyword">import</span> DataLoader <span class="hljs-meta">&gt;&gt;&gt; </span>train_dataloader = DataLoader(small_train_dataset, shuffle=<span class="hljs-literal">True</span>, batch_size=<span class="hljs-number">8</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>eval_dataloader = DataLoader(small_eval_dataset, batch_size=<span class="hljs-number">8</span>)`}}),Da=new j({props:{code:`from transformers import AutoModelForSequenceClassification model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", num_labels=5)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;bert-base-cased&quot;</span>, num_labels=<span class="hljs-number">5</span>)`}}),xa=new T({}),Oa=new j({props:{code:`from torch.optim import AdamW optimizer = AdamW(model.parameters(), lr=5e-5)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> torch.optim <span class="hljs-keyword">import</span> AdamW <span class="hljs-meta">&gt;&gt;&gt; </span>optimizer = AdamW(model.parameters(), lr=<span class="hljs-number">5e-5</span>)`}}),Fa=new j({props:{code:`from transformers import get_scheduler num_epochs = 3 num_training_steps = num_epochs * len(train_dataloader) lr_scheduler = get_scheduler( name="linear", optimizer=optimizer, num_warmup_steps=0, num_training_steps=num_training_steps )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> get_scheduler <span class="hljs-meta">&gt;&gt;&gt; </span>num_epochs = <span class="hljs-number">3</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_training_steps = num_epochs * <span class="hljs-built_in">len</span>(train_dataloader) <span class="hljs-meta">&gt;&gt;&gt; </span>lr_scheduler = get_scheduler( <span class="hljs-meta">... </span> name=<span class="hljs-string">&quot;linear&quot;</span>, optimizer=optimizer, num_warmup_steps=<span class="hljs-number">0</span>, num_training_steps=num_training_steps <span class="hljs-meta">... </span>)`}}),Na=new j({props:{code:`import torch device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") model.to(device)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>device = torch.device(<span class="hljs-string">&quot;cuda&quot;</span>) <span class="hljs-keyword">if</span> torch.cuda.is_available() <span class="hljs-keyword">else</span> torch.device(<span class="hljs-string">&quot;cpu&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.to(device)`}}),Ce=new Mi({props:{$$slots:{default:[Um]},$$scope:{ctx:oe}}}),Ia=new T({}),Ma=new j({props:{code:`from tqdm.auto import tqdm progress_bar = tqdm(range(num_training_steps)) model.train() for epoch in range(num_epochs): for batch in train_dataloader: batch = {k: v.to(device) for k, v in batch.items()} outputs = model(**batch) loss = outputs.loss loss.backward() optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> tqdm.auto <span class="hljs-keyword">import</span> tqdm <span class="hljs-meta">&gt;&gt;&gt; </span>progress_bar = tqdm(<span class="hljs-built_in">range</span>(num_training_steps)) <span class="hljs-meta">&gt;&gt;&gt; </span>model.train() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> epoch <span class="hljs-keyword">in</span> <span class="hljs-built_in">range</span>(num_epochs): <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> batch <span class="hljs-keyword">in</span> train_dataloader: <span class="hljs-meta">... </span> batch = {k: v.to(device) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> batch.items()} <span class="hljs-meta">... </span> outputs = model(**batch) <span class="hljs-meta">... </span> loss = outputs.loss <span class="hljs-meta">... </span> loss.backward() <span class="hljs-meta">... </span> optimizer.step() <span class="hljs-meta">... </span> lr_scheduler.step() <span class="hljs-meta">... </span> optimizer.zero_grad() <span class="hljs-meta">... </span> progress_bar.update(<span class="hljs-number">1</span>)`}}),Ha=new T({}),Ba=new j({props:{code:`metric = load_metric("accuracy") model.eval() for batch in eval_dataloader: batch = {k: v.to(device) for k, v in batch.items()} with torch.no_grad(): outputs = model(**batch) logits = outputs.logits predictions = torch.argmax(logits, dim=-1) metric.add_batch(predictions=predictions, references=batch["labels"]) metric.compute()`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>metric = load_metric(<span class="hljs-string">&quot;accuracy&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.<span class="hljs-built_in">eval</span>() <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> batch <span class="hljs-keyword">in</span> eval_dataloader: <span class="hljs-meta">... </span> batch = {k: v.to(device) <span class="hljs-keyword">for</span> k, v <span class="hljs-keyword">in</span> batch.items()} <span class="hljs-meta">... </span> <span class="hljs-keyword">with</span> torch.no_grad(): <span class="hljs-meta">... </span> outputs = model(**batch) <span class="hljs-meta">... </span> logits = outputs.logits <span class="hljs-meta">... </span> predictions = torch.argmax(logits, dim=-<span class="hljs-number">1</span>) <span class="hljs-meta">... </span> metric.add_batch(predictions=predictions, references=batch[<span class="hljs-string">&quot;labels&quot;</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>metric.compute()`}}),Ka=new T({}),{c(){$=o("meta"),y=m(),b=o("h1"),k=o("a"),P=o("span"),u(E.$$.fragment),O=m(),z=o("span"),A=i("Fine-tuning de um modelo pr\xE9-treinado"),w=m(),u(C.$$.fragment),F=m(),Ga=o("p"),Ur=i(`O uso de um modelo pr\xE9-treinado tem importantes vantagens. Redu\xE7\xE3o do custo computacional, a pegada de carbono, e te permite utilizar modelos de \xFAltima gera\xE7\xE3o sem ter que treinar um novo desde o in\xEDcio. O \u{1F917} Transformers proporciona acesso a milhares de modelos pr\xE9-treinados numa ampla gama de tarefas. Quando utilizar um modelo pr\xE9-treinado, treine-o com um dataset espec\xEDfico para a sua tarefa. Isto \xE9 chamado de fine-tuning, uma t\xE9cnica de treinamento incrivelmente poderosa. Neste tutorial faremos o fine-tuning de um modelo pr\xE9-treinado com um framework de Deep Learning da sua escolha:`),xt=m(),N=o("ul"),Le=o("li"),Wr=i("Fine-tuning de um modelo pr\xE9-treinado com o \u{1F917} Transformers "),bs=o("code"),Gr=i("Trainer"),Yr=i("."),Qr=m(),js=o("li"),Vr=i("Fine-tuning de um modelo pr\xE9-treinado no TensorFlow com o Keras."),Jr=m(),ws=o("li"),Xr=i("Fine-tuning de um modelo pr\xE9-treinado em PyTorch nativo."),St=m(),Ya=o("a"),Ot=m(),B=o("h2"),re=o("a"),ks=o("span"),u(Me.$$.fragment),Zr=m(),Es=o("span"),en=i("Preparando um dataset"),Ft=m(),u(He.$$.fragment),Nt=m(),Qa=o("p"),an=i(`Antes de aplicar o fine-tuning a um modelo pr\xE9-treinado, baixe um dataset e prepare-o para o treinamento. O tutorial anterior ensinar\xE1 a processar os dados para o treinamento, e ent\xE3o poder\xE1 ter a oportunidade de testar esse novo conhecimento em algo pr\xE1tico.`),It=m(),ne=o("p"),sn=i("Comece carregando o dataset "),Re=o("a"),tn=i("Yelp Reviews"),on=i(":"),Lt=m(),u(Be.$$.fragment),Mt=m(),le=o("p"),rn=i(`Como j\xE1 sabe, \xE9 necess\xE1rio ter um tokenizador para processar o texto e incluir uma estrat\xE9gia de padding e truncamento, para manejar qualquer tamanho var\xEDavel de sequ\xEAncia. Para processar o seu dataset em apenas um passo, utilize o m\xE9todo de \u{1F917} Datasets `),Ke=o("a"),ys=o("code"),nn=i("map"),ln=i(` para aplicar uma fun\xE7\xE3o de preprocessamento sobre todo o dataset.`),Ht=m(),u(Ue.$$.fragment),Rt=m(),Va=o("p"),pn=i("Se desejar, \xE9 poss\xEDvel criar um subconjunto menor do dataset completo para aplicar o fine-tuning e assim reduzir o tempo necess\xE1rio."),Bt=m(),u(We.$$.fragment),Kt=m(),Ja=o("a"),Ut=m(),K=o("h2"),ie=o("a"),qs=o("span"),u(Ge.$$.fragment),mn=m(),Xa=o("span"),cn=i("Fine-tuning com o "),As=o("code"),dn=i("Trainer"),Wt=m(),u(Ye.$$.fragment),Gt=m(),I=o("p"),un=i("O \u{1F917} Transformers proporciona uma classe "),Ts=o("code"),fn=i("Trainer"),hn=i(` otimizada para o treinamento de modelos de \u{1F917} Transformers, facilitando os primeiros passos do treinamento sem a necessidade de escrever manualmente o seu pr\xF3prio ciclo. A API do `),Ps=o("code"),gn=i("Trainer"),_n=i(` suporta um grande conjunto de op\xE7\xF5es de treinamento e funcionalidades, como o logging, o gradient accumulation e o mixed precision.`),Yt=m(),pe=o("p"),vn=i(`Comece carregando seu modelo e especifique o n\xFAmero de labels de previs\xE3o. A partir do `),Qe=o("a"),$n=i("Card Dataset"),bn=i(` do Yelp Reveiw, que ja sabemos ter 5 labels usamos o seguinte c\xF3digo:`),Qt=m(),u(Ve.$$.fragment),Vt=m(),u(me.$$.fragment),Jt=m(),U=o("h3"),ce=o("a"),zs=o("span"),u(Je.$$.fragment),jn=m(),Cs=o("span"),wn=i("Hiperpar\xE2metros de treinamento"),Xt=m(),L=o("p"),kn=i("Em seguida, crie uma classe "),Ds=o("code"),En=i("TrainingArguments"),yn=i(` que contenha todos os hiperpar\xE2metros que possam ser ajustados, assim como os indicadores para ativar as diferentes op\xE7\xF5es de treinamento. Para este tutorial, voc\xEA pode come\xE7ar o treinamento usando os `),Xe=o("a"),qn=i("hiperpar\xE1metros"),An=i(` padr\xE3o, por\xE9m, sinta-se livre para experimentar com eles e encontrar uma configura\xE7\xE3o \xF3tima.`),Zt=m(),Za=o("p"),Tn=i("Especifique onde salvar os checkpoints do treinamento:"),eo=m(),u(Ze.$$.fragment),ao=m(),W=o("h3"),de=o("a"),xs=o("span"),u(ea.$$.fragment),Pn=m(),Ss=o("span"),zn=i("M\xE9tricas"),so=m(),q=o("p"),Cn=i("O "),Os=o("code"),Dn=i("Trainer"),xn=i(` n\xE3o avalia automaticamente o rendimento do modelo durante o treinamento. Ser\xE1 necess\xE1rio passar ao `),Fs=o("code"),Sn=i("Trainer"),On=i(` uma fun\xE7\xE3o para calcular e fazer um diagn\xF3stico sobre as m\xE9tricas. A biblioteca \u{1F917} Datasets proporciona uma fun\xE7\xE3o de `),aa=o("a"),Ns=o("code"),Fn=i("accuracy"),Nn=i(` simples que pode ser carregada com a fun\xE7\xE3o `),Is=o("code"),In=i("load_metric"),Ln=i(" (ver este "),sa=o("a"),Mn=i("tutorial"),Hn=i(" para mais informa\xE7\xF5es):"),to=m(),u(ta.$$.fragment),oo=m(),D=o("p"),Rn=i("Defina a fun\xE7\xE3o "),Ls=o("code"),Bn=i("compute"),Kn=i(" dentro de "),Ms=o("code"),Un=i("metric"),Wn=i(` para calcular a precis\xE3o das suas predi\xE7\xF5es. Antes de passar as suas predi\xE7\xF5es ao `),Hs=o("code"),Gn=i("compute"),Yn=i(`, \xE9 necess\xE1rio converter as predi\xE7\xF5es \xE0 logits (lembre-se que todos os modelos de \u{1F917} Transformers retornam logits).`),ro=m(),u(oa.$$.fragment),no=m(),ue=o("p"),Qn=i("Se quiser controlar as suas m\xE9tricas de avalia\xE7\xE3o durante o fine-tuning, especifique o par\xE2metro "),Rs=o("code"),Vn=i("evaluation_strategy"),Jn=i(` nos seus argumentos de treinamento para que o modelo considere a m\xE9trica de avalia\xE7\xE3o ao final de cada \xE9poca:`),lo=m(),u(ra.$$.fragment),io=m(),G=o("h3"),fe=o("a"),Bs=o("span"),u(na.$$.fragment),Xn=m(),Ks=o("span"),Zn=i("Trainer"),po=m(),he=o("p"),el=i("Crie um objeto "),Us=o("code"),al=i("Trainer"),sl=i(" com o seu modelo, argumentos de treinamento, conjuntos de dados de treinamento e de teste, e a sua fun\xE7\xE3o de avalia\xE7\xE3o:"),mo=m(),u(la.$$.fragment),co=m(),ge=o("p"),tl=i("Em seguida, aplique o fine-tuning a seu modelo chamado "),Ws=o("code"),ol=i("train()"),rl=i(":"),uo=m(),u(ia.$$.fragment),fo=m(),es=o("a"),ho=m(),Y=o("h2"),_e=o("a"),Gs=o("span"),u(pa.$$.fragment),nl=m(),Ys=o("span"),ll=i("Fine-tuning com Keras"),go=m(),u(ma.$$.fragment),_o=m(),as=o("p"),il=i(`Os modelos de \u{1F917} Transformers tamb\xE9m permitem realizar o treinamento com o TensorFlow com a API do Keras. Contudo, ser\xE1 necess\xE1rio fazer algumas mudan\xE7as antes de realizar o fine-tuning.`),vo=m(),Q=o("h3"),ve=o("a"),Qs=o("span"),u(ca.$$.fragment),pl=m(),Vs=o("span"),ml=i("Convers\xE3o do dataset ao formato do TensorFlow"),$o=m(),M=o("p"),cl=i("O "),Js=o("code"),dl=i("DefaultDataCollator"),ul=i(` junta os tensores em um batch para que o modelo possa ser treinado em cima deles. Assegure-se de especificar os `),Xs=o("code"),fl=i("return_tensors"),hl=i(" para retornar os tensores do TensorFlow:"),bo=m(),u(da.$$.fragment),jo=m(),u($e.$$.fragment),wo=m(),x=o("p"),gl=i(`Em seguida, converta os datasets tokenizados em datasets do TensorFlow com o m\xE9todo `),ua=o("a"),Zs=o("code"),_l=i("to_tf_dataset"),vl=i(`. Especifique suas entradas em `),et=o("code"),$l=i("columns"),bl=i(" e seu r\xF3tulo em "),at=o("code"),jl=i("label_cols"),wl=i(":"),ko=m(),u(fa.$$.fragment),Eo=m(),V=o("h3"),be=o("a"),st=o("span"),u(ha.$$.fragment),kl=m(),tt=o("span"),El=i("Compila\xE7\xE3o e ajustes"),yo=m(),ss=o("p"),yl=i("Carregue um modelo do TensorFlow com o n\xFAmero esperado de r\xF3tulos:"),qo=m(),u(ga.$$.fragment),Ao=m(),je=o("p"),ql=i("A seguir, compile e ajuste o fine-tuning a seu modelo com "),_a=o("a"),ot=o("code"),Al=i("fit"),Tl=i(` como faria com qualquer outro modelo do Keras:`),To=m(),u(va.$$.fragment),Po=m(),ts=o("a"),zo=m(),J=o("h2"),we=o("a"),rt=o("span"),u($a.$$.fragment),Pl=m(),nt=o("span"),zl=i("Fine-tune em PyTorch nativo"),Co=m(),u(ba.$$.fragment),Do=m(),ke=o("p"),Cl=i("O "),lt=o("code"),Dl=i("Trainer"),xl=i(` se encarrega do ciclo de treinamento e permite aplicar o fine-tuning a um modelo em uma linha de c\xF3digo apenas. Para os usu\xE1rios que preferirem escrever o seu pr\xF3prio ciclo de treinamento, tamb\xE9m \xE9 poss\xEDvel aplicar o fine-tuning a um modelo de \u{1F917} Transformers em PyTorch nativo.`),xo=m(),os=o("p"),Sl=i(`Neste momento, talvez ocorra a necessidade de reinicar seu notebook ou executar a seguinte linha de c\xF3digo para liberar mem\xF3ria:`),So=m(),u(ja.$$.fragment),Oo=m(),Ee=o("p"),Ol=i("Em sequ\xEAncia, faremos um post-processing manual do "),it=o("code"),Fl=i("tokenized_dataset"),Nl=i(" e assim prepar\xE1-lo para o treinamento."),Fo=m(),H=o("ol"),wa=o("li"),ka=o("p"),Il=i("Apague a coluna de "),pt=o("code"),Ll=i("text"),Ml=i(" porque o modelo n\xE3o aceita texto cru como entrada:"),Hl=m(),u(Ea.$$.fragment),Rl=m(),ya=o("li"),X=o("p"),Bl=i("Troque o nome da coluna "),mt=o("code"),Kl=i("label"),Ul=i(" para "),ct=o("code"),Wl=i("labels"),Gl=i(", pois o modelo espera um argumento de mesmo nome:"),Yl=m(),u(qa.$$.fragment),Ql=m(),Aa=o("li"),dt=o("p"),Vl=i("Defina o formato do dataset para retornar tensores do PyTorch no lugar de listas:"),Jl=m(),u(Ta.$$.fragment),No=m(),rs=o("p"),Xl=i("Em sequ\xEAncia, crie um subconjunto menor do dataset, como foi mostrado anteriormente, para aceler\xE1-lo o fine-tuning."),Io=m(),u(Pa.$$.fragment),Lo=m(),Z=o("h3"),ye=o("a"),ut=o("span"),u(za.$$.fragment),Zl=m(),ft=o("span"),ei=i("DataLoader"),Mo=m(),qe=o("p"),ai=i("Crie um "),ht=o("code"),si=i("DataLoader"),ti=i(" para os seus datasets de treinamento e de teste para poder iterar sobre batches de dados:"),Ho=m(),u(Ca.$$.fragment),Ro=m(),ns=o("p"),oi=i("Carregue seu modelo com o n\xFAmero de labels esperados:"),Bo=m(),u(Da.$$.fragment),Ko=m(),ee=o("h3"),Ae=o("a"),gt=o("span"),u(xa.$$.fragment),ri=m(),_t=o("span"),ni=i("Otimiza\xE7\xE3o e configura\xE7\xE3o do Learning Rate"),Uo=m(),Te=o("p"),li=i(`Crie um otimizador e um learning rate para aplicar o fine-tuning ao modelo. Iremos utilizar o otimizador `),Sa=o("a"),vt=o("code"),ii=i("AdamW"),pi=i(" do PyTorch:"),Wo=m(),u(Oa.$$.fragment),Go=m(),Pe=o("p"),mi=i("Defina o learning rate do "),$t=o("code"),ci=i("Trainer"),di=i(":"),Yo=m(),u(Fa.$$.fragment),Qo=m(),ze=o("p"),ui=i("Por \xFAltimo, especifique o "),bt=o("code"),fi=i("device"),hi=i(` do ambiente para utilizar uma GPU se tiver acesso \xE0 alguma. Caso contr\xE1rio, o treinamento em uma CPU pode acabar levando v\xE1rias horas em vez de minutos.`),Vo=m(),u(Na.$$.fragment),Jo=m(),u(Ce.$$.fragment),Xo=m(),ls=o("p"),gi=i("Perfeito, agora estamos prontos para come\xE7ar o treinamento! \u{1F973}"),Zo=m(),ae=o("h3"),De=o("a"),jt=o("span"),u(Ia.$$.fragment),_i=m(),wt=o("span"),vi=i("Ciclo de treinamento"),er=m(),xe=o("p"),$i=i("Para visualizar melhor o processo de treinamento, utilize a biblioteca "),La=o("a"),bi=i("tqdm"),ji=i(` para adicionar uma barra de progresso sobre o n\xFAmero de passos percorridos no treinamento atual:`),ar=m(),u(Ma.$$.fragment),sr=m(),se=o("h3"),Se=o("a"),kt=o("span"),u(Ha.$$.fragment),wi=m(),Et=o("span"),ki=i("M\xE9tricas"),tr=m(),R=o("p"),Ei=i("Da mesma forma que \xE9 necess\xE1rio adicionar uma fun\xE7\xE3o de avalia\xE7\xE3o ao "),yt=o("code"),yi=i("Trainer"),qi=i(`, \xE9 necess\xE1rio fazer o mesmo quando escrevendo o pr\xF3prio ciclo de treinamento. Contudo, em vez de calcular e retornar a m\xE9trica final de cada \xE9poca, voc\xEA dever\xE1 adicionar todos os batches com `),Ra=o("a"),qt=o("code"),Ai=i("add_batch"),Ti=i(` e calcular a m\xE9trica apenas no final.`),or=m(),u(Ba.$$.fragment),rr=m(),is=o("a"),nr=m(),te=o("h2"),Oe=o("a"),At=o("span"),u(Ka.$$.fragment),Pi=m(),Tt=o("span"),zi=i("Recursos adicionais"),lr=m(),ps=o("p"),Ci=i("Para mais exemplos de fine-tuning acesse:"),ir=m(),Fe=o("ul"),Pt=o("li"),ms=o("p"),Ua=o("a"),Di=i("\u{1F917} Transformers Examples"),xi=i(` inclui scripts para treinas tarefas comuns de NLP em PyTorch e TensorFlow.`),Si=m(),zt=o("li"),cs=o("p"),ds=o("a"),Oi=i("\u{1F917} Transformers Notebooks"),Fi=i(` cont\xE9m v\xE1rios notebooks sobre como aplicar o fine-tuning a um modelo para tarefas espec\xEDficas no PyTorch e TensorFlow.`),this.h()},l(e){const t=Mm('[data-svelte="svelte-1phssyn"]',document.head);$=r(t,"META",{name:!0,content:!0}),t.forEach(a),y=c(e),b=r(e,"H1",{class:!0});var Wa=n(b);k=r(Wa,"A",{id:!0,class:!0,href:!0});var Ct=n(k);P=r(Ct,"SPAN",{});var Dt=n(P);f(E.$$.fragment,Dt),Dt.forEach(a),Ct.forEach(a),O=c(Wa),z=r(Wa,"SPAN",{});var Hi=n(z);A=p(Hi,"Fine-tuning de um modelo pr\xE9-treinado"),Hi.forEach(a),Wa.forEach(a),w=c(e),f(C.$$.fragment,e),F=c(e),Ga=r(e,"P",{});var Ri=n(Ga);Ur=p(Ri,`O uso de um modelo pr\xE9-treinado tem importantes vantagens. Redu\xE7\xE3o do custo computacional, a pegada de carbono, e te permite utilizar modelos de \xFAltima gera\xE7\xE3o sem ter que treinar um novo desde o in\xEDcio. O \u{1F917} Transformers proporciona acesso a milhares de modelos pr\xE9-treinados numa ampla gama de tarefas. Quando utilizar um modelo pr\xE9-treinado, treine-o com um dataset espec\xEDfico para a sua tarefa. Isto \xE9 chamado de fine-tuning, uma t\xE9cnica de treinamento incrivelmente poderosa. Neste tutorial faremos o fine-tuning de um modelo pr\xE9-treinado com um framework de Deep Learning da sua escolha:`),Ri.forEach(a),xt=c(e),N=r(e,"UL",{});var us=n(N);Le=r(us,"LI",{});var mr=n(Le);Wr=p(mr,"Fine-tuning de um modelo pr\xE9-treinado com o \u{1F917} Transformers "),bs=r(mr,"CODE",{});var Bi=n(bs);Gr=p(Bi,"Trainer"),Bi.forEach(a),Yr=p(mr,"."),mr.forEach(a),Qr=c(us),js=r(us,"LI",{});var Ki=n(js);Vr=p(Ki,"Fine-tuning de um modelo pr\xE9-treinado no TensorFlow com o Keras."),Ki.forEach(a),Jr=c(us),ws=r(us,"LI",{});var Ui=n(ws);Xr=p(Ui,"Fine-tuning de um modelo pr\xE9-treinado em PyTorch nativo."),Ui.forEach(a),us.forEach(a),St=c(e),Ya=r(e,"A",{id:!0}),n(Ya).forEach(a),Ot=c(e),B=r(e,"H2",{class:!0});var cr=n(B);re=r(cr,"A",{id:!0,class:!0,href:!0});var Wi=n(re);ks=r(Wi,"SPAN",{});var Gi=n(ks);f(Me.$$.fragment,Gi),Gi.forEach(a),Wi.forEach(a),Zr=c(cr),Es=r(cr,"SPAN",{});var Yi=n(Es);en=p(Yi,"Preparando um dataset"),Yi.forEach(a),cr.forEach(a),Ft=c(e),f(He.$$.fragment,e),Nt=c(e),Qa=r(e,"P",{});var Qi=n(Qa);an=p(Qi,`Antes de aplicar o fine-tuning a um modelo pr\xE9-treinado, baixe um dataset e prepare-o para o treinamento. O tutorial anterior ensinar\xE1 a processar os dados para o treinamento, e ent\xE3o poder\xE1 ter a oportunidade de testar esse novo conhecimento em algo pr\xE1tico.`),Qi.forEach(a),It=c(e),ne=r(e,"P",{});var dr=n(ne);sn=p(dr,"Comece carregando o dataset "),Re=r(dr,"A",{href:!0,rel:!0});var Vi=n(Re);tn=p(Vi,"Yelp Reviews"),Vi.forEach(a),on=p(dr,":"),dr.forEach(a),Lt=c(e),f(Be.$$.fragment,e),Mt=c(e),le=r(e,"P",{});var ur=n(le);rn=p(ur,`Como j\xE1 sabe, \xE9 necess\xE1rio ter um tokenizador para processar o texto e incluir uma estrat\xE9gia de padding e truncamento, para manejar qualquer tamanho var\xEDavel de sequ\xEAncia. Para processar o seu dataset em apenas um passo, utilize o m\xE9todo de \u{1F917} Datasets `),Ke=r(ur,"A",{href:!0,rel:!0});var Ji=n(Ke);ys=r(Ji,"CODE",{});var Xi=n(ys);nn=p(Xi,"map"),Xi.forEach(a),Ji.forEach(a),ln=p(ur,` para aplicar uma fun\xE7\xE3o de preprocessamento sobre todo o dataset.`),ur.forEach(a),Ht=c(e),f(Ue.$$.fragment,e),Rt=c(e),Va=r(e,"P",{});var Zi=n(Va);pn=p(Zi,"Se desejar, \xE9 poss\xEDvel criar um subconjunto menor do dataset completo para aplicar o fine-tuning e assim reduzir o tempo necess\xE1rio."),Zi.forEach(a),Bt=c(e),f(We.$$.fragment,e),Kt=c(e),Ja=r(e,"A",{id:!0}),n(Ja).forEach(a),Ut=c(e),K=r(e,"H2",{class:!0});var fr=n(K);ie=r(fr,"A",{id:!0,class:!0,href:!0});var ep=n(ie);qs=r(ep,"SPAN",{});var ap=n(qs);f(Ge.$$.fragment,ap),ap.forEach(a),ep.forEach(a),mn=c(fr),Xa=r(fr,"SPAN",{});var Ni=n(Xa);cn=p(Ni,"Fine-tuning com o "),As=r(Ni,"CODE",{});var sp=n(As);dn=p(sp,"Trainer"),sp.forEach(a),Ni.forEach(a),fr.forEach(a),Wt=c(e),f(Ye.$$.fragment,e),Gt=c(e),I=r(e,"P",{});var fs=n(I);un=p(fs,"O \u{1F917} Transformers proporciona uma classe "),Ts=r(fs,"CODE",{});var tp=n(Ts);fn=p(tp,"Trainer"),tp.forEach(a),hn=p(fs,` otimizada para o treinamento de modelos de \u{1F917} Transformers, facilitando os primeiros passos do treinamento sem a necessidade de escrever manualmente o seu pr\xF3prio ciclo. A API do `),Ps=r(fs,"CODE",{});var op=n(Ps);gn=p(op,"Trainer"),op.forEach(a),_n=p(fs,` suporta um grande conjunto de op\xE7\xF5es de treinamento e funcionalidades, como o logging, o gradient accumulation e o mixed precision.`),fs.forEach(a),Yt=c(e),pe=r(e,"P",{});var hr=n(pe);vn=p(hr,`Comece carregando seu modelo e especifique o n\xFAmero de labels de previs\xE3o. A partir do `),Qe=r(hr,"A",{href:!0,rel:!0});var rp=n(Qe);$n=p(rp,"Card Dataset"),rp.forEach(a),bn=p(hr,` do Yelp Reveiw, que ja sabemos ter 5 labels usamos o seguinte c\xF3digo:`),hr.forEach(a),Qt=c(e),f(Ve.$$.fragment,e),Vt=c(e),f(me.$$.fragment,e),Jt=c(e),U=r(e,"H3",{class:!0});var gr=n(U);ce=r(gr,"A",{id:!0,class:!0,href:!0});var np=n(ce);zs=r(np,"SPAN",{});var lp=n(zs);f(Je.$$.fragment,lp),lp.forEach(a),np.forEach(a),jn=c(gr),Cs=r(gr,"SPAN",{});var ip=n(Cs);wn=p(ip,"Hiperpar\xE2metros de treinamento"),ip.forEach(a),gr.forEach(a),Xt=c(e),L=r(e,"P",{});var hs=n(L);kn=p(hs,"Em seguida, crie uma classe "),Ds=r(hs,"CODE",{});var pp=n(Ds);En=p(pp,"TrainingArguments"),pp.forEach(a),yn=p(hs,` que contenha todos os hiperpar\xE2metros que possam ser ajustados, assim como os indicadores para ativar as diferentes op\xE7\xF5es de treinamento. Para este tutorial, voc\xEA pode come\xE7ar o treinamento usando os `),Xe=r(hs,"A",{href:!0,rel:!0});var mp=n(Xe);qn=p(mp,"hiperpar\xE1metros"),mp.forEach(a),An=p(hs,` padr\xE3o, por\xE9m, sinta-se livre para experimentar com eles e encontrar uma configura\xE7\xE3o \xF3tima.`),hs.forEach(a),Zt=c(e),Za=r(e,"P",{});var cp=n(Za);Tn=p(cp,"Especifique onde salvar os checkpoints do treinamento:"),cp.forEach(a),eo=c(e),f(Ze.$$.fragment,e),ao=c(e),W=r(e,"H3",{class:!0});var _r=n(W);de=r(_r,"A",{id:!0,class:!0,href:!0});var dp=n(de);xs=r(dp,"SPAN",{});var up=n(xs);f(ea.$$.fragment,up),up.forEach(a),dp.forEach(a),Pn=c(_r),Ss=r(_r,"SPAN",{});var fp=n(Ss);zn=p(fp,"M\xE9tricas"),fp.forEach(a),_r.forEach(a),so=c(e),q=r(e,"P",{});var S=n(q);Cn=p(S,"O "),Os=r(S,"CODE",{});var hp=n(Os);Dn=p(hp,"Trainer"),hp.forEach(a),xn=p(S,` n\xE3o avalia automaticamente o rendimento do modelo durante o treinamento. Ser\xE1 necess\xE1rio passar ao `),Fs=r(S,"CODE",{});var gp=n(Fs);Sn=p(gp,"Trainer"),gp.forEach(a),On=p(S,` uma fun\xE7\xE3o para calcular e fazer um diagn\xF3stico sobre as m\xE9tricas. A biblioteca \u{1F917} Datasets proporciona uma fun\xE7\xE3o de `),aa=r(S,"A",{href:!0,rel:!0});var _p=n(aa);Ns=r(_p,"CODE",{});var vp=n(Ns);Fn=p(vp,"accuracy"),vp.forEach(a),_p.forEach(a),Nn=p(S,` simples que pode ser carregada com a fun\xE7\xE3o `),Is=r(S,"CODE",{});var $p=n(Is);In=p($p,"load_metric"),$p.forEach(a),Ln=p(S," (ver este "),sa=r(S,"A",{href:!0,rel:!0});var bp=n(sa);Mn=p(bp,"tutorial"),bp.forEach(a),Hn=p(S," para mais informa\xE7\xF5es):"),S.forEach(a),to=c(e),f(ta.$$.fragment,e),oo=c(e),D=r(e,"P",{});var Ne=n(D);Rn=p(Ne,"Defina a fun\xE7\xE3o "),Ls=r(Ne,"CODE",{});var jp=n(Ls);Bn=p(jp,"compute"),jp.forEach(a),Kn=p(Ne," dentro de "),Ms=r(Ne,"CODE",{});var wp=n(Ms);Un=p(wp,"metric"),wp.forEach(a),Wn=p(Ne,` para calcular a precis\xE3o das suas predi\xE7\xF5es. Antes de passar as suas predi\xE7\xF5es ao `),Hs=r(Ne,"CODE",{});var kp=n(Hs);Gn=p(kp,"compute"),kp.forEach(a),Yn=p(Ne,`, \xE9 necess\xE1rio converter as predi\xE7\xF5es \xE0 logits (lembre-se que todos os modelos de \u{1F917} Transformers retornam logits).`),Ne.forEach(a),ro=c(e),f(oa.$$.fragment,e),no=c(e),ue=r(e,"P",{});var vr=n(ue);Qn=p(vr,"Se quiser controlar as suas m\xE9tricas de avalia\xE7\xE3o durante o fine-tuning, especifique o par\xE2metro "),Rs=r(vr,"CODE",{});var Ep=n(Rs);Vn=p(Ep,"evaluation_strategy"),Ep.forEach(a),Jn=p(vr,` nos seus argumentos de treinamento para que o modelo considere a m\xE9trica de avalia\xE7\xE3o ao final de cada \xE9poca:`),vr.forEach(a),lo=c(e),f(ra.$$.fragment,e),io=c(e),G=r(e,"H3",{class:!0});var $r=n(G);fe=r($r,"A",{id:!0,class:!0,href:!0});var yp=n(fe);Bs=r(yp,"SPAN",{});var qp=n(Bs);f(na.$$.fragment,qp),qp.forEach(a),yp.forEach(a),Xn=c($r),Ks=r($r,"SPAN",{});var Ap=n(Ks);Zn=p(Ap,"Trainer"),Ap.forEach(a),$r.forEach(a),po=c(e),he=r(e,"P",{});var br=n(he);el=p(br,"Crie um objeto "),Us=r(br,"CODE",{});var Tp=n(Us);al=p(Tp,"Trainer"),Tp.forEach(a),sl=p(br," com o seu modelo, argumentos de treinamento, conjuntos de dados de treinamento e de teste, e a sua fun\xE7\xE3o de avalia\xE7\xE3o:"),br.forEach(a),mo=c(e),f(la.$$.fragment,e),co=c(e),ge=r(e,"P",{});var jr=n(ge);tl=p(jr,"Em seguida, aplique o fine-tuning a seu modelo chamado "),Ws=r(jr,"CODE",{});var Pp=n(Ws);ol=p(Pp,"train()"),Pp.forEach(a),rl=p(jr,":"),jr.forEach(a),uo=c(e),f(ia.$$.fragment,e),fo=c(e),es=r(e,"A",{id:!0}),n(es).forEach(a),ho=c(e),Y=r(e,"H2",{class:!0});var wr=n(Y);_e=r(wr,"A",{id:!0,class:!0,href:!0});var zp=n(_e);Gs=r(zp,"SPAN",{});var Cp=n(Gs);f(pa.$$.fragment,Cp),Cp.forEach(a),zp.forEach(a),nl=c(wr),Ys=r(wr,"SPAN",{});var Dp=n(Ys);ll=p(Dp,"Fine-tuning com Keras"),Dp.forEach(a),wr.forEach(a),go=c(e),f(ma.$$.fragment,e),_o=c(e),as=r(e,"P",{});var xp=n(as);il=p(xp,`Os modelos de \u{1F917} Transformers tamb\xE9m permitem realizar o treinamento com o TensorFlow com a API do Keras. Contudo, ser\xE1 necess\xE1rio fazer algumas mudan\xE7as antes de realizar o fine-tuning.`),xp.forEach(a),vo=c(e),Q=r(e,"H3",{class:!0});var kr=n(Q);ve=r(kr,"A",{id:!0,class:!0,href:!0});var Sp=n(ve);Qs=r(Sp,"SPAN",{});var Op=n(Qs);f(ca.$$.fragment,Op),Op.forEach(a),Sp.forEach(a),pl=c(kr),Vs=r(kr,"SPAN",{});var Fp=n(Vs);ml=p(Fp,"Convers\xE3o do dataset ao formato do TensorFlow"),Fp.forEach(a),kr.forEach(a),$o=c(e),M=r(e,"P",{});var gs=n(M);cl=p(gs,"O "),Js=r(gs,"CODE",{});var Np=n(Js);dl=p(Np,"DefaultDataCollator"),Np.forEach(a),ul=p(gs,` junta os tensores em um batch para que o modelo possa ser treinado em cima deles. Assegure-se de especificar os `),Xs=r(gs,"CODE",{});var Ip=n(Xs);fl=p(Ip,"return_tensors"),Ip.forEach(a),hl=p(gs," para retornar os tensores do TensorFlow:"),gs.forEach(a),bo=c(e),f(da.$$.fragment,e),jo=c(e),f($e.$$.fragment,e),wo=c(e),x=r(e,"P",{});var Ie=n(x);gl=p(Ie,`Em seguida, converta os datasets tokenizados em datasets do TensorFlow com o m\xE9todo `),ua=r(Ie,"A",{href:!0,rel:!0});var Lp=n(ua);Zs=r(Lp,"CODE",{});var Mp=n(Zs);_l=p(Mp,"to_tf_dataset"),Mp.forEach(a),Lp.forEach(a),vl=p(Ie,`. Especifique suas entradas em `),et=r(Ie,"CODE",{});var Hp=n(et);$l=p(Hp,"columns"),Hp.forEach(a),bl=p(Ie," e seu r\xF3tulo em "),at=r(Ie,"CODE",{});var Rp=n(at);jl=p(Rp,"label_cols"),Rp.forEach(a),wl=p(Ie,":"),Ie.forEach(a),ko=c(e),f(fa.$$.fragment,e),Eo=c(e),V=r(e,"H3",{class:!0});var Er=n(V);be=r(Er,"A",{id:!0,class:!0,href:!0});var Bp=n(be);st=r(Bp,"SPAN",{});var Kp=n(st);f(ha.$$.fragment,Kp),Kp.forEach(a),Bp.forEach(a),kl=c(Er),tt=r(Er,"SPAN",{});var Up=n(tt);El=p(Up,"Compila\xE7\xE3o e ajustes"),Up.forEach(a),Er.forEach(a),yo=c(e),ss=r(e,"P",{});var Wp=n(ss);yl=p(Wp,"Carregue um modelo do TensorFlow com o n\xFAmero esperado de r\xF3tulos:"),Wp.forEach(a),qo=c(e),f(ga.$$.fragment,e),Ao=c(e),je=r(e,"P",{});var yr=n(je);ql=p(yr,"A seguir, compile e ajuste o fine-tuning a seu modelo com "),_a=r(yr,"A",{href:!0,rel:!0});var Gp=n(_a);ot=r(Gp,"CODE",{});var Yp=n(ot);Al=p(Yp,"fit"),Yp.forEach(a),Gp.forEach(a),Tl=p(yr,` como faria com qualquer outro modelo do Keras:`),yr.forEach(a),To=c(e),f(va.$$.fragment,e),Po=c(e),ts=r(e,"A",{id:!0}),n(ts).forEach(a),zo=c(e),J=r(e,"H2",{class:!0});var qr=n(J);we=r(qr,"A",{id:!0,class:!0,href:!0});var Qp=n(we);rt=r(Qp,"SPAN",{});var Vp=n(rt);f($a.$$.fragment,Vp),Vp.forEach(a),Qp.forEach(a),Pl=c(qr),nt=r(qr,"SPAN",{});var Jp=n(nt);zl=p(Jp,"Fine-tune em PyTorch nativo"),Jp.forEach(a),qr.forEach(a),Co=c(e),f(ba.$$.fragment,e),Do=c(e),ke=r(e,"P",{});var Ar=n(ke);Cl=p(Ar,"O "),lt=r(Ar,"CODE",{});var Xp=n(lt);Dl=p(Xp,"Trainer"),Xp.forEach(a),xl=p(Ar,` se encarrega do ciclo de treinamento e permite aplicar o fine-tuning a um modelo em uma linha de c\xF3digo apenas. Para os usu\xE1rios que preferirem escrever o seu pr\xF3prio ciclo de treinamento, tamb\xE9m \xE9 poss\xEDvel aplicar o fine-tuning a um modelo de \u{1F917} Transformers em PyTorch nativo.`),Ar.forEach(a),xo=c(e),os=r(e,"P",{});var Zp=n(os);Sl=p(Zp,`Neste momento, talvez ocorra a necessidade de reinicar seu notebook ou executar a seguinte linha de c\xF3digo para liberar mem\xF3ria:`),Zp.forEach(a),So=c(e),f(ja.$$.fragment,e),Oo=c(e),Ee=r(e,"P",{});var Tr=n(Ee);Ol=p(Tr,"Em sequ\xEAncia, faremos um post-processing manual do "),it=r(Tr,"CODE",{});var em=n(it);Fl=p(em,"tokenized_dataset"),em.forEach(a),Nl=p(Tr," e assim prepar\xE1-lo para o treinamento."),Tr.forEach(a),Fo=c(e),H=r(e,"OL",{});var _s=n(H);wa=r(_s,"LI",{});var Pr=n(wa);ka=r(Pr,"P",{});var zr=n(ka);Il=p(zr,"Apague a coluna de "),pt=r(zr,"CODE",{});var am=n(pt);Ll=p(am,"text"),am.forEach(a),Ml=p(zr," porque o modelo n\xE3o aceita texto cru como entrada:"),zr.forEach(a),Hl=c(Pr),f(Ea.$$.fragment,Pr),Pr.forEach(a),Rl=c(_s),ya=r(_s,"LI",{});var Cr=n(ya);X=r(Cr,"P",{});var vs=n(X);Bl=p(vs,"Troque o nome da coluna "),mt=r(vs,"CODE",{});var sm=n(mt);Kl=p(sm,"label"),sm.forEach(a),Ul=p(vs," para "),ct=r(vs,"CODE",{});var tm=n(ct);Wl=p(tm,"labels"),tm.forEach(a),Gl=p(vs,", pois o modelo espera um argumento de mesmo nome:"),vs.forEach(a),Yl=c(Cr),f(qa.$$.fragment,Cr),Cr.forEach(a),Ql=c(_s),Aa=r(_s,"LI",{});var Dr=n(Aa);dt=r(Dr,"P",{});var om=n(dt);Vl=p(om,"Defina o formato do dataset para retornar tensores do PyTorch no lugar de listas:"),om.forEach(a),Jl=c(Dr),f(Ta.$$.fragment,Dr),Dr.forEach(a),_s.forEach(a),No=c(e),rs=r(e,"P",{});var rm=n(rs);Xl=p(rm,"Em sequ\xEAncia, crie um subconjunto menor do dataset, como foi mostrado anteriormente, para aceler\xE1-lo o fine-tuning."),rm.forEach(a),Io=c(e),f(Pa.$$.fragment,e),Lo=c(e),Z=r(e,"H3",{class:!0});var xr=n(Z);ye=r(xr,"A",{id:!0,class:!0,href:!0});var nm=n(ye);ut=r(nm,"SPAN",{});var lm=n(ut);f(za.$$.fragment,lm),lm.forEach(a),nm.forEach(a),Zl=c(xr),ft=r(xr,"SPAN",{});var im=n(ft);ei=p(im,"DataLoader"),im.forEach(a),xr.forEach(a),Mo=c(e),qe=r(e,"P",{});var Sr=n(qe);ai=p(Sr,"Crie um "),ht=r(Sr,"CODE",{});var pm=n(ht);si=p(pm,"DataLoader"),pm.forEach(a),ti=p(Sr," para os seus datasets de treinamento e de teste para poder iterar sobre batches de dados:"),Sr.forEach(a),Ho=c(e),f(Ca.$$.fragment,e),Ro=c(e),ns=r(e,"P",{});var mm=n(ns);oi=p(mm,"Carregue seu modelo com o n\xFAmero de labels esperados:"),mm.forEach(a),Bo=c(e),f(Da.$$.fragment,e),Ko=c(e),ee=r(e,"H3",{class:!0});var Or=n(ee);Ae=r(Or,"A",{id:!0,class:!0,href:!0});var cm=n(Ae);gt=r(cm,"SPAN",{});var dm=n(gt);f(xa.$$.fragment,dm),dm.forEach(a),cm.forEach(a),ri=c(Or),_t=r(Or,"SPAN",{});var um=n(_t);ni=p(um,"Otimiza\xE7\xE3o e configura\xE7\xE3o do Learning Rate"),um.forEach(a),Or.forEach(a),Uo=c(e),Te=r(e,"P",{});var Fr=n(Te);li=p(Fr,`Crie um otimizador e um learning rate para aplicar o fine-tuning ao modelo. Iremos utilizar o otimizador `),Sa=r(Fr,"A",{href:!0,rel:!0});var fm=n(Sa);vt=r(fm,"CODE",{});var hm=n(vt);ii=p(hm,"AdamW"),hm.forEach(a),fm.forEach(a),pi=p(Fr," do PyTorch:"),Fr.forEach(a),Wo=c(e),f(Oa.$$.fragment,e),Go=c(e),Pe=r(e,"P",{});var Nr=n(Pe);mi=p(Nr,"Defina o learning rate do "),$t=r(Nr,"CODE",{});var gm=n($t);ci=p(gm,"Trainer"),gm.forEach(a),di=p(Nr,":"),Nr.forEach(a),Yo=c(e),f(Fa.$$.fragment,e),Qo=c(e),ze=r(e,"P",{});var Ir=n(ze);ui=p(Ir,"Por \xFAltimo, especifique o "),bt=r(Ir,"CODE",{});var _m=n(bt);fi=p(_m,"device"),_m.forEach(a),hi=p(Ir,` do ambiente para utilizar uma GPU se tiver acesso \xE0 alguma. Caso contr\xE1rio, o treinamento em uma CPU pode acabar levando v\xE1rias horas em vez de minutos.`),Ir.forEach(a),Vo=c(e),f(Na.$$.fragment,e),Jo=c(e),f(Ce.$$.fragment,e),Xo=c(e),ls=r(e,"P",{});var vm=n(ls);gi=p(vm,"Perfeito, agora estamos prontos para come\xE7ar o treinamento! \u{1F973}"),vm.forEach(a),Zo=c(e),ae=r(e,"H3",{class:!0});var Lr=n(ae);De=r(Lr,"A",{id:!0,class:!0,href:!0});var $m=n(De);jt=r($m,"SPAN",{});var bm=n(jt);f(Ia.$$.fragment,bm),bm.forEach(a),$m.forEach(a),_i=c(Lr),wt=r(Lr,"SPAN",{});var jm=n(wt);vi=p(jm,"Ciclo de treinamento"),jm.forEach(a),Lr.forEach(a),er=c(e),xe=r(e,"P",{});var Mr=n(xe);$i=p(Mr,"Para visualizar melhor o processo de treinamento, utilize a biblioteca "),La=r(Mr,"A",{href:!0,rel:!0});var wm=n(La);bi=p(wm,"tqdm"),wm.forEach(a),ji=p(Mr,` para adicionar uma barra de progresso sobre o n\xFAmero de passos percorridos no treinamento atual:`),Mr.forEach(a),ar=c(e),f(Ma.$$.fragment,e),sr=c(e),se=r(e,"H3",{class:!0});var Hr=n(se);Se=r(Hr,"A",{id:!0,class:!0,href:!0});var km=n(Se);kt=r(km,"SPAN",{});var Em=n(kt);f(Ha.$$.fragment,Em),Em.forEach(a),km.forEach(a),wi=c(Hr),Et=r(Hr,"SPAN",{});var ym=n(Et);ki=p(ym,"M\xE9tricas"),ym.forEach(a),Hr.forEach(a),tr=c(e),R=r(e,"P",{});var $s=n(R);Ei=p($s,"Da mesma forma que \xE9 necess\xE1rio adicionar uma fun\xE7\xE3o de avalia\xE7\xE3o ao "),yt=r($s,"CODE",{});var qm=n(yt);yi=p(qm,"Trainer"),qm.forEach(a),qi=p($s,`, \xE9 necess\xE1rio fazer o mesmo quando escrevendo o pr\xF3prio ciclo de treinamento. Contudo, em vez de calcular e retornar a m\xE9trica final de cada \xE9poca, voc\xEA dever\xE1 adicionar todos os batches com `),Ra=r($s,"A",{href:!0,rel:!0});var Am=n(Ra);qt=r(Am,"CODE",{});var Tm=n(qt);Ai=p(Tm,"add_batch"),Tm.forEach(a),Am.forEach(a),Ti=p($s,` e calcular a m\xE9trica apenas no final.`),$s.forEach(a),or=c(e),f(Ba.$$.fragment,e),rr=c(e),is=r(e,"A",{id:!0}),n(is).forEach(a),nr=c(e),te=r(e,"H2",{class:!0});var Rr=n(te);Oe=r(Rr,"A",{id:!0,class:!0,href:!0});var Pm=n(Oe);At=r(Pm,"SPAN",{});var zm=n(At);f(Ka.$$.fragment,zm),zm.forEach(a),Pm.forEach(a),Pi=c(Rr),Tt=r(Rr,"SPAN",{});var Cm=n(Tt);zi=p(Cm,"Recursos adicionais"),Cm.forEach(a),Rr.forEach(a),lr=c(e),ps=r(e,"P",{});var Dm=n(ps);Ci=p(Dm,"Para mais exemplos de fine-tuning acesse:"),Dm.forEach(a),ir=c(e),Fe=r(e,"UL",{});var Br=n(Fe);Pt=r(Br,"LI",{});var xm=n(Pt);ms=r(xm,"P",{});var Ii=n(ms);Ua=r(Ii,"A",{href:!0,rel:!0});var Sm=n(Ua);Di=p(Sm,"\u{1F917} Transformers Examples"),Sm.forEach(a),xi=p(Ii,` inclui scripts para treinas tarefas comuns de NLP em PyTorch e TensorFlow.`),Ii.forEach(a),xm.forEach(a),Si=c(Br),zt=r(Br,"LI",{});var Om=n(zt);cs=r(Om,"P",{});var Li=n(cs);ds=r(Li,"A",{href:!0});var Fm=n(ds);Oi=p(Fm,"\u{1F917} Transformers Notebooks"),Fm.forEach(a),Fi=p(Li,` cont\xE9m v\xE1rios notebooks sobre como aplicar o fine-tuning a um modelo para tarefas espec\xEDficas no PyTorch e TensorFlow.`),Li.forEach(a),Om.forEach(a),Br.forEach(a),this.h()},h(){d($,"name","hf:doc:metadata"),d($,"content",JSON.stringify(Gm)),d(k,"id","finetuning-de-um-modelo-prtreinado"),d(k,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(k,"href","#finetuning-de-um-modelo-prtreinado"),d(b,"class","relative group"),d(Ya,"id","data-processing"),d(re,"id","preparando-um-dataset"),d(re,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(re,"href","#preparando-um-dataset"),d(B,"class","relative group"),d(Re,"href","https://huggingface.co/datasets/yelp_review_full"),d(Re,"rel","nofollow"),d(Ke,"href","https://huggingface.co/docs/datasets/process.html#map"),d(Ke,"rel","nofollow"),d(Ja,"id","trainer"),d(ie,"id","finetuning-com-o-trainer"),d(ie,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(ie,"href","#finetuning-com-o-trainer"),d(K,"class","relative group"),d(Qe,"href","https://huggingface.co/datasets/yelp_review_full#data-fields"),d(Qe,"rel","nofollow"),d(ce,"id","hiperparmetros-de-treinamento"),d(ce,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(ce,"href","#hiperparmetros-de-treinamento"),d(U,"class","relative group"),d(Xe,"href","https://huggingface.co/docs/transformers/main_classes/trainer#transformers.TrainingArguments"),d(Xe,"rel","nofollow"),d(de,"id","mtricas"),d(de,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(de,"href","#mtricas"),d(W,"class","relative group"),d(aa,"href","https://huggingface.co/metrics/accuracy"),d(aa,"rel","nofollow"),d(sa,"href","https://huggingface.co/docs/datasets/metrics.html"),d(sa,"rel","nofollow"),d(fe,"id","trainer"),d(fe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(fe,"href","#trainer"),d(G,"class","relative group"),d(es,"id","keras"),d(_e,"id","finetuning-com-keras"),d(_e,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(_e,"href","#finetuning-com-keras"),d(Y,"class","relative group"),d(ve,"id","converso-do-dataset-ao-formato-do-tensorflow"),d(ve,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(ve,"href","#converso-do-dataset-ao-formato-do-tensorflow"),d(Q,"class","relative group"),d(ua,"href","https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.to_tf_dataset"),d(ua,"rel","nofollow"),d(be,"id","compilao-e-ajustes"),d(be,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(be,"href","#compilao-e-ajustes"),d(V,"class","relative group"),d(_a,"href","https://keras.io/api/models/model_training_apis/"),d(_a,"rel","nofollow"),d(ts,"id","pytorch_native"),d(we,"id","finetune-em-pytorch-nativo"),d(we,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(we,"href","#finetune-em-pytorch-nativo"),d(J,"class","relative group"),d(ye,"id","dataloader"),d(ye,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(ye,"href","#dataloader"),d(Z,"class","relative group"),d(Ae,"id","otimizao-e-configurao-do-learning-rate"),d(Ae,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Ae,"href","#otimizao-e-configurao-do-learning-rate"),d(ee,"class","relative group"),d(Sa,"href","https://pytorch.org/docs/stable/generated/torch.optim.AdamW.html"),d(Sa,"rel","nofollow"),d(De,"id","ciclo-de-treinamento"),d(De,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(De,"href","#ciclo-de-treinamento"),d(ae,"class","relative group"),d(La,"href","https://tqdm.github.io/"),d(La,"rel","nofollow"),d(Se,"id","mtricas"),d(Se,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Se,"href","#mtricas"),d(se,"class","relative group"),d(Ra,"href","https://huggingface.co/docs/datasets/package_reference/main_classes.html?highlight=add_batch#datasets.Metric.add_batch"),d(Ra,"rel","nofollow"),d(is,"id","additional-resources"),d(Oe,"id","recursos-adicionais"),d(Oe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),d(Oe,"href","#recursos-adicionais"),d(te,"class","relative group"),d(Ua,"href","https://github.com/huggingface/transformers/tree/main/examples"),d(Ua,"rel","nofollow"),d(ds,"href","notebooks")},m(e,t){s(document.head,$),l(e,y,t),l(e,b,t),s(b,k),s(k,P),h(E,P,null),s(b,O),s(b,z),s(z,A),l(e,w,t),h(C,e,t),l(e,F,t),l(e,Ga,t),s(Ga,Ur),l(e,xt,t),l(e,N,t),s(N,Le),s(Le,Wr),s(Le,bs),s(bs,Gr),s(Le,Yr),s(N,Qr),s(N,js),s(js,Vr),s(N,Jr),s(N,ws),s(ws,Xr),l(e,St,t),l(e,Ya,t),l(e,Ot,t),l(e,B,t),s(B,re),s(re,ks),h(Me,ks,null),s(B,Zr),s(B,Es),s(Es,en),l(e,Ft,t),h(He,e,t),l(e,Nt,t),l(e,Qa,t),s(Qa,an),l(e,It,t),l(e,ne,t),s(ne,sn),s(ne,Re),s(Re,tn),s(ne,on),l(e,Lt,t),h(Be,e,t),l(e,Mt,t),l(e,le,t),s(le,rn),s(le,Ke),s(Ke,ys),s(ys,nn),s(le,ln),l(e,Ht,t),h(Ue,e,t),l(e,Rt,t),l(e,Va,t),s(Va,pn),l(e,Bt,t),h(We,e,t),l(e,Kt,t),l(e,Ja,t),l(e,Ut,t),l(e,K,t),s(K,ie),s(ie,qs),h(Ge,qs,null),s(K,mn),s(K,Xa),s(Xa,cn),s(Xa,As),s(As,dn),l(e,Wt,t),h(Ye,e,t),l(e,Gt,t),l(e,I,t),s(I,un),s(I,Ts),s(Ts,fn),s(I,hn),s(I,Ps),s(Ps,gn),s(I,_n),l(e,Yt,t),l(e,pe,t),s(pe,vn),s(pe,Qe),s(Qe,$n),s(pe,bn),l(e,Qt,t),h(Ve,e,t),l(e,Vt,t),h(me,e,t),l(e,Jt,t),l(e,U,t),s(U,ce),s(ce,zs),h(Je,zs,null),s(U,jn),s(U,Cs),s(Cs,wn),l(e,Xt,t),l(e,L,t),s(L,kn),s(L,Ds),s(Ds,En),s(L,yn),s(L,Xe),s(Xe,qn),s(L,An),l(e,Zt,t),l(e,Za,t),s(Za,Tn),l(e,eo,t),h(Ze,e,t),l(e,ao,t),l(e,W,t),s(W,de),s(de,xs),h(ea,xs,null),s(W,Pn),s(W,Ss),s(Ss,zn),l(e,so,t),l(e,q,t),s(q,Cn),s(q,Os),s(Os,Dn),s(q,xn),s(q,Fs),s(Fs,Sn),s(q,On),s(q,aa),s(aa,Ns),s(Ns,Fn),s(q,Nn),s(q,Is),s(Is,In),s(q,Ln),s(q,sa),s(sa,Mn),s(q,Hn),l(e,to,t),h(ta,e,t),l(e,oo,t),l(e,D,t),s(D,Rn),s(D,Ls),s(Ls,Bn),s(D,Kn),s(D,Ms),s(Ms,Un),s(D,Wn),s(D,Hs),s(Hs,Gn),s(D,Yn),l(e,ro,t),h(oa,e,t),l(e,no,t),l(e,ue,t),s(ue,Qn),s(ue,Rs),s(Rs,Vn),s(ue,Jn),l(e,lo,t),h(ra,e,t),l(e,io,t),l(e,G,t),s(G,fe),s(fe,Bs),h(na,Bs,null),s(G,Xn),s(G,Ks),s(Ks,Zn),l(e,po,t),l(e,he,t),s(he,el),s(he,Us),s(Us,al),s(he,sl),l(e,mo,t),h(la,e,t),l(e,co,t),l(e,ge,t),s(ge,tl),s(ge,Ws),s(Ws,ol),s(ge,rl),l(e,uo,t),h(ia,e,t),l(e,fo,t),l(e,es,t),l(e,ho,t),l(e,Y,t),s(Y,_e),s(_e,Gs),h(pa,Gs,null),s(Y,nl),s(Y,Ys),s(Ys,ll),l(e,go,t),h(ma,e,t),l(e,_o,t),l(e,as,t),s(as,il),l(e,vo,t),l(e,Q,t),s(Q,ve),s(ve,Qs),h(ca,Qs,null),s(Q,pl),s(Q,Vs),s(Vs,ml),l(e,$o,t),l(e,M,t),s(M,cl),s(M,Js),s(Js,dl),s(M,ul),s(M,Xs),s(Xs,fl),s(M,hl),l(e,bo,t),h(da,e,t),l(e,jo,t),h($e,e,t),l(e,wo,t),l(e,x,t),s(x,gl),s(x,ua),s(ua,Zs),s(Zs,_l),s(x,vl),s(x,et),s(et,$l),s(x,bl),s(x,at),s(at,jl),s(x,wl),l(e,ko,t),h(fa,e,t),l(e,Eo,t),l(e,V,t),s(V,be),s(be,st),h(ha,st,null),s(V,kl),s(V,tt),s(tt,El),l(e,yo,t),l(e,ss,t),s(ss,yl),l(e,qo,t),h(ga,e,t),l(e,Ao,t),l(e,je,t),s(je,ql),s(je,_a),s(_a,ot),s(ot,Al),s(je,Tl),l(e,To,t),h(va,e,t),l(e,Po,t),l(e,ts,t),l(e,zo,t),l(e,J,t),s(J,we),s(we,rt),h($a,rt,null),s(J,Pl),s(J,nt),s(nt,zl),l(e,Co,t),h(ba,e,t),l(e,Do,t),l(e,ke,t),s(ke,Cl),s(ke,lt),s(lt,Dl),s(ke,xl),l(e,xo,t),l(e,os,t),s(os,Sl),l(e,So,t),h(ja,e,t),l(e,Oo,t),l(e,Ee,t),s(Ee,Ol),s(Ee,it),s(it,Fl),s(Ee,Nl),l(e,Fo,t),l(e,H,t),s(H,wa),s(wa,ka),s(ka,Il),s(ka,pt),s(pt,Ll),s(ka,Ml),s(wa,Hl),h(Ea,wa,null),s(H,Rl),s(H,ya),s(ya,X),s(X,Bl),s(X,mt),s(mt,Kl),s(X,Ul),s(X,ct),s(ct,Wl),s(X,Gl),s(ya,Yl),h(qa,ya,null),s(H,Ql),s(H,Aa),s(Aa,dt),s(dt,Vl),s(Aa,Jl),h(Ta,Aa,null),l(e,No,t),l(e,rs,t),s(rs,Xl),l(e,Io,t),h(Pa,e,t),l(e,Lo,t),l(e,Z,t),s(Z,ye),s(ye,ut),h(za,ut,null),s(Z,Zl),s(Z,ft),s(ft,ei),l(e,Mo,t),l(e,qe,t),s(qe,ai),s(qe,ht),s(ht,si),s(qe,ti),l(e,Ho,t),h(Ca,e,t),l(e,Ro,t),l(e,ns,t),s(ns,oi),l(e,Bo,t),h(Da,e,t),l(e,Ko,t),l(e,ee,t),s(ee,Ae),s(Ae,gt),h(xa,gt,null),s(ee,ri),s(ee,_t),s(_t,ni),l(e,Uo,t),l(e,Te,t),s(Te,li),s(Te,Sa),s(Sa,vt),s(vt,ii),s(Te,pi),l(e,Wo,t),h(Oa,e,t),l(e,Go,t),l(e,Pe,t),s(Pe,mi),s(Pe,$t),s($t,ci),s(Pe,di),l(e,Yo,t),h(Fa,e,t),l(e,Qo,t),l(e,ze,t),s(ze,ui),s(ze,bt),s(bt,fi),s(ze,hi),l(e,Vo,t),h(Na,e,t),l(e,Jo,t),h(Ce,e,t),l(e,Xo,t),l(e,ls,t),s(ls,gi),l(e,Zo,t),l(e,ae,t),s(ae,De),s(De,jt),h(Ia,jt,null),s(ae,_i),s(ae,wt),s(wt,vi),l(e,er,t),l(e,xe,t),s(xe,$i),s(xe,La),s(La,bi),s(xe,ji),l(e,ar,t),h(Ma,e,t),l(e,sr,t),l(e,se,t),s(se,Se),s(Se,kt),h(Ha,kt,null),s(se,wi),s(se,Et),s(Et,ki),l(e,tr,t),l(e,R,t),s(R,Ei),s(R,yt),s(yt,yi),s(R,qi),s(R,Ra),s(Ra,qt),s(qt,Ai),s(R,Ti),l(e,or,t),h(Ba,e,t),l(e,rr,t),l(e,is,t),l(e,nr,t),l(e,te,t),s(te,Oe),s(Oe,At),h(Ka,At,null),s(te,Pi),s(te,Tt),s(Tt,zi),l(e,lr,t),l(e,ps,t),s(ps,Ci),l(e,ir,t),l(e,Fe,t),s(Fe,Pt),s(Pt,ms),s(ms,Ua),s(Ua,Di),s(ms,xi),s(Fe,Si),s(Fe,zt),s(zt,cs),s(cs,ds),s(ds,Oi),s(cs,Fi),pr=!0},p(e,[t]){const Wa={};t&2&&(Wa.$$scope={dirty:t,ctx:e}),me.$set(Wa);const Ct={};t&2&&(Ct.$$scope={dirty:t,ctx:e}),$e.$set(Ct);const Dt={};t&2&&(Dt.$$scope={dirty:t,ctx:e}),Ce.$set(Dt)},i(e){pr||(g(E.$$.fragment,e),g(C.$$.fragment,e),g(Me.$$.fragment,e),g(He.$$.fragment,e),g(Be.$$.fragment,e),g(Ue.$$.fragment,e),g(We.$$.fragment,e),g(Ge.$$.fragment,e),g(Ye.$$.fragment,e),g(Ve.$$.fragment,e),g(me.$$.fragment,e),g(Je.$$.fragment,e),g(Ze.$$.fragment,e),g(ea.$$.fragment,e),g(ta.$$.fragment,e),g(oa.$$.fragment,e),g(ra.$$.fragment,e),g(na.$$.fragment,e),g(la.$$.fragment,e),g(ia.$$.fragment,e),g(pa.$$.fragment,e),g(ma.$$.fragment,e),g(ca.$$.fragment,e),g(da.$$.fragment,e),g($e.$$.fragment,e),g(fa.$$.fragment,e),g(ha.$$.fragment,e),g(ga.$$.fragment,e),g(va.$$.fragment,e),g($a.$$.fragment,e),g(ba.$$.fragment,e),g(ja.$$.fragment,e),g(Ea.$$.fragment,e),g(qa.$$.fragment,e),g(Ta.$$.fragment,e),g(Pa.$$.fragment,e),g(za.$$.fragment,e),g(Ca.$$.fragment,e),g(Da.$$.fragment,e),g(xa.$$.fragment,e),g(Oa.$$.fragment,e),g(Fa.$$.fragment,e),g(Na.$$.fragment,e),g(Ce.$$.fragment,e),g(Ia.$$.fragment,e),g(Ma.$$.fragment,e),g(Ha.$$.fragment,e),g(Ba.$$.fragment,e),g(Ka.$$.fragment,e),pr=!0)},o(e){_(E.$$.fragment,e),_(C.$$.fragment,e),_(Me.$$.fragment,e),_(He.$$.fragment,e),_(Be.$$.fragment,e),_(Ue.$$.fragment,e),_(We.$$.fragment,e),_(Ge.$$.fragment,e),_(Ye.$$.fragment,e),_(Ve.$$.fragment,e),_(me.$$.fragment,e),_(Je.$$.fragment,e),_(Ze.$$.fragment,e),_(ea.$$.fragment,e),_(ta.$$.fragment,e),_(oa.$$.fragment,e),_(ra.$$.fragment,e),_(na.$$.fragment,e),_(la.$$.fragment,e),_(ia.$$.fragment,e),_(pa.$$.fragment,e),_(ma.$$.fragment,e),_(ca.$$.fragment,e),_(da.$$.fragment,e),_($e.$$.fragment,e),_(fa.$$.fragment,e),_(ha.$$.fragment,e),_(ga.$$.fragment,e),_(va.$$.fragment,e),_($a.$$.fragment,e),_(ba.$$.fragment,e),_(ja.$$.fragment,e),_(Ea.$$.fragment,e),_(qa.$$.fragment,e),_(Ta.$$.fragment,e),_(Pa.$$.fragment,e),_(za.$$.fragment,e),_(Ca.$$.fragment,e),_(Da.$$.fragment,e),_(xa.$$.fragment,e),_(Oa.$$.fragment,e),_(Fa.$$.fragment,e),_(Na.$$.fragment,e),_(Ce.$$.fragment,e),_(Ia.$$.fragment,e),_(Ma.$$.fragment,e),_(Ha.$$.fragment,e),_(Ba.$$.fragment,e),_(Ka.$$.fragment,e),pr=!1},d(e){a($),e&&a(y),e&&a(b),v(E),e&&a(w),v(C,e),e&&a(F),e&&a(Ga),e&&a(xt),e&&a(N),e&&a(St),e&&a(Ya),e&&a(Ot),e&&a(B),v(Me),e&&a(Ft),v(He,e),e&&a(Nt),e&&a(Qa),e&&a(It),e&&a(ne),e&&a(Lt),v(Be,e),e&&a(Mt),e&&a(le),e&&a(Ht),v(Ue,e),e&&a(Rt),e&&a(Va),e&&a(Bt),v(We,e),e&&a(Kt),e&&a(Ja),e&&a(Ut),e&&a(K),v(Ge),e&&a(Wt),v(Ye,e),e&&a(Gt),e&&a(I),e&&a(Yt),e&&a(pe),e&&a(Qt),v(Ve,e),e&&a(Vt),v(me,e),e&&a(Jt),e&&a(U),v(Je),e&&a(Xt),e&&a(L),e&&a(Zt),e&&a(Za),e&&a(eo),v(Ze,e),e&&a(ao),e&&a(W),v(ea),e&&a(so),e&&a(q),e&&a(to),v(ta,e),e&&a(oo),e&&a(D),e&&a(ro),v(oa,e),e&&a(no),e&&a(ue),e&&a(lo),v(ra,e),e&&a(io),e&&a(G),v(na),e&&a(po),e&&a(he),e&&a(mo),v(la,e),e&&a(co),e&&a(ge),e&&a(uo),v(ia,e),e&&a(fo),e&&a(es),e&&a(ho),e&&a(Y),v(pa),e&&a(go),v(ma,e),e&&a(_o),e&&a(as),e&&a(vo),e&&a(Q),v(ca),e&&a($o),e&&a(M),e&&a(bo),v(da,e),e&&a(jo),v($e,e),e&&a(wo),e&&a(x),e&&a(ko),v(fa,e),e&&a(Eo),e&&a(V),v(ha),e&&a(yo),e&&a(ss),e&&a(qo),v(ga,e),e&&a(Ao),e&&a(je),e&&a(To),v(va,e),e&&a(Po),e&&a(ts),e&&a(zo),e&&a(J),v($a),e&&a(Co),v(ba,e),e&&a(Do),e&&a(ke),e&&a(xo),e&&a(os),e&&a(So),v(ja,e),e&&a(Oo),e&&a(Ee),e&&a(Fo),e&&a(H),v(Ea),v(qa),v(Ta),e&&a(No),e&&a(rs),e&&a(Io),v(Pa,e),e&&a(Lo),e&&a(Z),v(za),e&&a(Mo),e&&a(qe),e&&a(Ho),v(Ca,e),e&&a(Ro),e&&a(ns),e&&a(Bo),v(Da,e),e&&a(Ko),e&&a(ee),v(xa),e&&a(Uo),e&&a(Te),e&&a(Wo),v(Oa,e),e&&a(Go),e&&a(Pe),e&&a(Yo),v(Fa,e),e&&a(Qo),e&&a(ze),e&&a(Vo),v(Na,e),e&&a(Jo),v(Ce,e),e&&a(Xo),e&&a(ls),e&&a(Zo),e&&a(ae),v(Ia),e&&a(er),e&&a(xe),e&&a(ar),v(Ma,e),e&&a(sr),e&&a(se),v(Ha),e&&a(tr),e&&a(R),e&&a(or),v(Ba,e),e&&a(rr),e&&a(is),e&&a(nr),e&&a(te),v(Ka),e&&a(lr),e&&a(ps),e&&a(ir),e&&a(Fe)}}}const Gm={local:"finetuning-de-um-modelo-prtreinado",sections:[{local:"preparando-um-dataset",title:"Preparando um dataset"},{local:"finetuning-com-o-trainer",sections:[{local:"hiperparmetros-de-treinamento",title:"Hiperpar\xE2metros de treinamento"},{local:"mtricas",title:"M\xE9tricas"},{local:"trainer",title:"Trainer"}],title:"Fine-tuning com o `Trainer`"},{local:"finetuning-com-keras",sections:[{local:"converso-do-dataset-ao-formato-do-tensorflow",title:"Convers\xE3o do dataset ao formato do TensorFlow"},{local:"compilao-e-ajustes",title:"Compila\xE7\xE3o e ajustes"}],title:"Fine-tuning com Keras"},{local:"finetune-em-pytorch-nativo",sections:[{local:"dataloader",title:"DataLoader"},{local:"otimizao-e-configurao-do-learning-rate",title:"Otimiza\xE7\xE3o e configura\xE7\xE3o do Learning Rate"},{local:"ciclo-de-treinamento",title:"Ciclo de treinamento"},{local:"mtricas",title:"M\xE9tricas"}],title:"Fine-tune em PyTorch nativo"},{local:"recursos-adicionais",title:"Recursos adicionais"}],title:"Fine-tuning de um modelo pr\xE9-treinado"};function Ym(oe){return Hm(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class ac extends Nm{constructor($){super();Im(this,$,Ym,Wm,Lm,{})}}export{ac as default,Gm as metadata};
486
0
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/_app
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/_app/pages/multilingual.mdx-hf-doc-builder.js
import{S as Yr,i as Zr,s as en,e as t,k as d,w as u,t as i,M as on,c as s,d as a,m as p,a as l,x as f,h as r,b as c,G as o,g as m,y as g,L as an,q as h,o as _,B as v,v as tn}from"../chunks/vendor-hf-doc-builder.js";import{I as re}from"../chunks/IconCopyLink-hf-doc-builder.js";import{C as z}from"../chunks/CodeBlock-hf-doc-builder.js";import{D as sn}from"../chunks/DocNotebookDropdown-hf-doc-builder.js";function ln(wi){let L,fa,O,S,po,ne,Lt,co,Ot,ga,me,ha,y,Ct,uo,Tt,Pt,de,Dt,Xt,_a,C,I,fo,pe,At,go,Nt,va,Le,St,ba,T,B,ho,ce,It,_o,Bt,Ea,Oe,Rt,ka,b,Ce,vo,Ft,Ht,Gt,Te,bo,Wt,Ut,Jt,Pe,Eo,Kt,Qt,Vt,De,ko,Yt,Zt,es,Xe,$o,os,as,ts,Ae,Mo,ss,ls,is,Ne,xo,rs,ns,$a,$,ms,wo,ds,ps,zo,cs,us,yo,fs,gs,Ma,R,hs,qo,_s,vs,xa,ue,wa,F,bs,jo,Es,ks,za,fe,ya,Se,$s,qa,ge,ja,M,Ms,Lo,xs,ws,Oo,zs,ys,Co,qs,js,La,he,Oa,H,Ls,To,Os,Cs,Ca,_e,Ta,q,Ts,ve,Ps,Ds,Po,Xs,As,Pa,P,G,Do,be,Ns,Xo,Ss,Da,Ie,Is,Xa,W,Be,Ao,Bs,Rs,Fs,Re,No,Hs,Gs,Aa,Fe,Ws,Na,D,U,So,Ee,Us,Io,Js,Sa,He,Ks,Ia,J,Ge,Bo,Qs,Vs,Ys,We,Ro,Zs,el,Ba,Ue,ol,Ra,X,K,Fo,ke,al,Ho,tl,Fa,Je,sl,Ha,Q,Ke,Go,ll,il,rl,Qe,Wo,nl,ml,Ga,Ve,dl,Wa,A,V,Uo,$e,pl,Jo,cl,Ua,Ye,ul,Ja,Y,Ze,Ko,fl,gl,hl,eo,Qo,_l,vl,Ka,Z,bl,Vo,El,kl,Qa,Me,Va,oo,$l,Ya,xe,Za,x,Ml,Yo,xl,wl,Zo,zl,yl,ea,ql,jl,et,we,ot,N,ee,oa,ze,Ll,aa,Ol,at,ao,Cl,tt,E,to,ta,Tl,Pl,Dl,so,sa,Xl,Al,Nl,lo,la,Sl,Il,Bl,io,ia,Rl,Fl,Hl,ra,na,Gl,st,oe,Wl,ma,Ul,Jl,lt,ye,it,ro,Kl,rt,qe,nt,w,Ql,da,Vl,Yl,pa,Zl,ei,ca,oi,ai,mt,je,dt,ae,ti,ua,si,li,pt;return ne=new re({}),me=new sn({props:{classNames:"absolute z-10 right-0 top-0",options:[{label:"Mixed",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/main/transformers_doc/pt/multilingual.ipynb"},{label:"PyTorch",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/main/transformers_doc/pt/pytorch/multilingual.ipynb"},{label:"TensorFlow",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/main/transformers_doc/pt/tensorflow/multilingual.ipynb"},{label:"Mixed",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/transformers_doc/pt/multilingual.ipynb"},{label:"PyTorch",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/transformers_doc/pt/pytorch/multilingual.ipynb"},{label:"TensorFlow",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/transformers_doc/pt/tensorflow/multilingual.ipynb"}]}}),pe=new re({}),ce=new re({}),ue=new z({props:{code:`import torch from transformers import XLMTokenizer, XLMWithLMHeadModel tokenizer = XLMTokenizer.from_pretrained("xlm-clm-enfr-1024") model = XLMWithLMHeadModel.from_pretrained("xlm-clm-enfr-1024")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> XLMTokenizer, XLMWithLMHeadModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = XLMTokenizer.from_pretrained(<span class="hljs-string">&quot;xlm-clm-enfr-1024&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = XLMWithLMHeadModel.from_pretrained(<span class="hljs-string">&quot;xlm-clm-enfr-1024&quot;</span>)`}}),fe=new z({props:{code:"print(tokenizer.lang2id)",highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(tokenizer.lang2id) {<span class="hljs-string">&#x27;en&#x27;</span>: <span class="hljs-number">0</span>, <span class="hljs-string">&#x27;fr&#x27;</span>: <span class="hljs-number">1</span>}`}}),ge=new z({props:{code:'input_ids = torch.tensor([tokenizer.encode("Wikipedia was used to")]) # batch size of 1',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>input_ids = torch.tensor([tokenizer.encode(<span class="hljs-string">&quot;Wikipedia was used to&quot;</span>)]) <span class="hljs-comment"># batch size of 1</span>'}}),he=new z({props:{code:`language_id = tokenizer.lang2id["en"] # 0 langs = torch.tensor([language_id] * input_ids.shape[1]) # torch.tensor([0, 0, 0, ..., 0]) # We reshape it to be of size (batch_size, sequence_length) langs = langs.view(1, -1) # is now of shape [1, sequence_length] (we have a batch size of 1)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>language_id = tokenizer.lang2id[<span class="hljs-string">&quot;en&quot;</span>] <span class="hljs-comment"># 0</span> <span class="hljs-meta">&gt;&gt;&gt; </span>langs = torch.tensor([language_id] * input_ids.shape[<span class="hljs-number">1</span>]) <span class="hljs-comment"># torch.tensor([0, 0, 0, ..., 0])</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># We reshape it to be of size (batch_size, sequence_length)</span> <span class="hljs-meta">&gt;&gt;&gt; </span>langs = langs.view(<span class="hljs-number">1</span>, -<span class="hljs-number">1</span>) <span class="hljs-comment"># is now of shape [1, sequence_length] (we have a batch size of 1)</span>`}}),_e=new z({props:{code:"outputs = model(input_ids, langs=langs)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>outputs = model(input_ids, langs=langs)'}}),be=new re({}),Ee=new re({}),ke=new re({}),$e=new re({}),Me=new z({props:{code:`from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer en_text = "Do not meddle in the affairs of wizards, for they are subtle and quick to anger." chinese_text = "\u4E0D\u8981\u63D2\u624B\u5DEB\u5E2B\u7684\u4E8B\u52D9, \u56E0\u70BA\u4ED6\u5011\u662F\u5FAE\u5999\u7684, \u5F88\u5FEB\u5C31\u6703\u767C\u6012." tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M", src_lang="zh") model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> M2M100ForConditionalGeneration, M2M100Tokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>en_text = <span class="hljs-string">&quot;Do not meddle in the affairs of wizards, for they are subtle and quick to anger.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>chinese_text = <span class="hljs-string">&quot;\u4E0D\u8981\u63D2\u624B\u5DEB\u5E2B\u7684\u4E8B\u52D9, \u56E0\u70BA\u4ED6\u5011\u662F\u5FAE\u5999\u7684, \u5F88\u5FEB\u5C31\u6703\u767C\u6012.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = M2M100Tokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/m2m100_418M&quot;</span>, src_lang=<span class="hljs-string">&quot;zh&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = M2M100ForConditionalGeneration.from_pretrained(<span class="hljs-string">&quot;facebook/m2m100_418M&quot;</span>)`}}),xe=new z({props:{code:'encoded_zh = tokenizer(chinese_text, return_tensors="pt")',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>encoded_zh = tokenizer(chinese_text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)'}}),we=new z({props:{code:`generated_tokens = model.generate(**encoded_zh, forced_bos_token_id=tokenizer.get_lang_id("en")) tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>generated_tokens = model.generate(**encoded_zh, forced_bos_token_id=tokenizer.get_lang_id(<span class="hljs-string">&quot;en&quot;</span>)) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(generated_tokens, skip_special_tokens=<span class="hljs-literal">True</span>) <span class="hljs-string">&#x27;Do not interfere with the matters of the witches, because they are delicate and will soon be angry.&#x27;</span>`}}),ze=new re({}),ye=new z({props:{code:`from transformers import AutoTokenizer, AutoModelForSeq2SeqLM en_text = "Do not meddle in the affairs of wizards, for they are subtle and quick to anger." fi_text = "\xC4l\xE4 sekaannu velhojen asioihin, sill\xE4 ne ovat hienovaraisia ja nopeasti vihaisia." tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-50-many-to-many-mmt", src_lang="fi_FI") model = AutoModelForSeq2SeqLM.from_pretrained("facebook/mbart-large-50-many-to-many-mmt")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, AutoModelForSeq2SeqLM <span class="hljs-meta">&gt;&gt;&gt; </span>en_text = <span class="hljs-string">&quot;Do not meddle in the affairs of wizards, for they are subtle and quick to anger.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>fi_text = <span class="hljs-string">&quot;\xC4l\xE4 sekaannu velhojen asioihin, sill\xE4 ne ovat hienovaraisia ja nopeasti vihaisia.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-50-many-to-many-mmt&quot;</span>, src_lang=<span class="hljs-string">&quot;fi_FI&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;facebook/mbart-large-50-many-to-many-mmt&quot;</span>)`}}),qe=new z({props:{code:'encoded_en = tokenizer(en_text, return_tensors="pt")',highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>encoded_en = tokenizer(en_text, return_tensors=<span class="hljs-string">&quot;pt&quot;</span>)'}}),je=new z({props:{code:`generated_tokens = model.generate(**encoded_en, forced_bos_token_id=tokenizer.lang_code_to_id("en_XX")) tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>generated_tokens = model.generate(**encoded_en, forced_bos_token_id=tokenizer.lang_code_to_id(<span class="hljs-string">&quot;en_XX&quot;</span>)) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.batch_decode(generated_tokens, skip_special_tokens=<span class="hljs-literal">True</span>) <span class="hljs-string">&quot;Don&#x27;t interfere with the wizard&#x27;s affairs, because they are subtle, will soon get angry.&quot;</span>`}}),{c(){L=t("meta"),fa=d(),O=t("h1"),S=t("a"),po=t("span"),u(ne.$$.fragment),Lt=d(),co=t("span"),Ot=i("Modelos multilingu\xEDsticos para infer\xEAncia"),ga=d(),u(me.$$.fragment),ha=d(),y=t("p"),Ct=i(`Existem v\xE1rios modelos multilingu\xEDsticos no \u{1F917} Transformers e seus usos para infer\xEAncia diferem dos modelos monol\xEDngues. No entanto, nem `),uo=t("em"),Tt=i("todos"),Pt=i(` os usos dos modelos multil\xEDngues s\xE3o t\xE3o diferentes. Alguns modelos, como o `),de=t("a"),Dt=i("bert-base-multilingual-uncased"),Xt=i(`, podem ser usados como se fossem monol\xEDngues. Este guia ir\xE1 te ajudar a usar modelos multil\xEDngues cujo uso difere para o prop\xF3sito de infer\xEAncia.`),_a=d(),C=t("h2"),I=t("a"),fo=t("span"),u(pe.$$.fragment),At=d(),go=t("span"),Nt=i("XLM"),va=d(),Le=t("p"),St=i(`O XLM tem dez checkpoints diferentes dos quais apenas um \xE9 monol\xEDngue. Os nove checkpoints restantes do modelo s\xE3o subdivididos em duas categorias: checkpoints que usam de language embeddings e os que n\xE3o.`),ba=d(),T=t("h3"),B=t("a"),ho=t("span"),u(ce.$$.fragment),It=d(),_o=t("span"),Bt=i("XLM com language embeddings"),Ea=d(),Oe=t("p"),Rt=i("Os seguintes modelos de XLM usam language embeddings para especificar a linguagem utilizada para a infer\xEAncia."),ka=d(),b=t("ul"),Ce=t("li"),vo=t("code"),Ft=i("xlm-mlm-ende-1024"),Ht=i(" (Masked language modeling, English-German)"),Gt=d(),Te=t("li"),bo=t("code"),Wt=i("xlm-mlm-enfr-1024"),Ut=i(" (Masked language modeling, English-French)"),Jt=d(),Pe=t("li"),Eo=t("code"),Kt=i("xlm-mlm-enro-1024"),Qt=i(" (Masked language modeling, English-Romanian)"),Vt=d(),De=t("li"),ko=t("code"),Yt=i("xlm-mlm-xnli15-1024"),Zt=i(" (Masked language modeling, XNLI languages)"),es=d(),Xe=t("li"),$o=t("code"),os=i("xlm-mlm-tlm-xnli15-1024"),as=i(" (Masked language modeling + translation, XNLI languages)"),ts=d(),Ae=t("li"),Mo=t("code"),ss=i("xlm-clm-enfr-1024"),ls=i(" (Causal language modeling, English-French)"),is=d(),Ne=t("li"),xo=t("code"),rs=i("xlm-clm-ende-1024"),ns=i(" (Causal language modeling, English-German)"),$a=d(),$=t("p"),ms=i("Os language embeddings s\xE3o representados por um tensor de mesma dimens\xE3o que os "),wo=t("code"),ds=i("input_ids"),ps=i(` passados ao modelo. Os valores destes tensores dependem do idioma utilizado e se identificam pelos atributos `),zo=t("code"),cs=i("lang2id"),us=i(" e "),yo=t("code"),fs=i("id2lang"),gs=i(" do tokenizador."),Ma=d(),R=t("p"),hs=i("Neste exemplo, carregamos o checkpoint "),qo=t("code"),_s=i("xlm-clm-enfr-1024"),vs=i("(Causal language modeling, English-French):"),xa=d(),u(ue.$$.fragment),wa=d(),F=t("p"),bs=i("O atributo "),jo=t("code"),Es=i("lang2id"),ks=i(" do tokenizador mostra os idiomas deste modelo e seus ids:"),za=d(),u(fe.$$.fragment),ya=d(),Se=t("p"),$s=i("Em seguida, cria-se um input de exemplo:"),qa=d(),u(ge.$$.fragment),ja=d(),M=t("p"),Ms=i("Estabelece-se o id do idioma, por exemplo "),Lo=t("code"),xs=i('"en"'),ws=i(`, e utiliza-se o mesmo para definir a language embedding. A language embedding \xE9 um tensor preenchido com `),Oo=t("code"),zs=i("0"),ys=i(`, que \xE9 o id de idioma para o ingl\xEAs. Este tensor deve ser do mesmo tamanho que os `),Co=t("code"),qs=i("input_ids"),js=i("."),La=d(),u(he.$$.fragment),Oa=d(),H=t("p"),Ls=i("Agora voc\xEA pode passar os "),To=t("code"),Os=i("input_ids"),Cs=i(" e a language embedding ao modelo:"),Ca=d(),u(_e.$$.fragment),Ta=d(),q=t("p"),Ts=i("O script "),ve=t("a"),Ps=i("run_generation.py"),Ds=i(" pode gerar um texto com language embeddings utilizando os checkpoints "),Po=t("code"),Xs=i("xlm-clm"),As=i("."),Pa=d(),P=t("h3"),G=t("a"),Do=t("span"),u(be.$$.fragment),Ns=d(),Xo=t("span"),Ss=i("XLM sem language embeddings"),Da=d(),Ie=t("p"),Is=i("Os seguintes modelos XLM n\xE3o requerem o uso de language embeddings durante a infer\xEAncia:"),Xa=d(),W=t("ul"),Be=t("li"),Ao=t("code"),Bs=i("xlm-mlm-17-1280"),Rs=i(" (Modelagem de linguagem com m\xE1scara, 17 idiomas)"),Fs=d(),Re=t("li"),No=t("code"),Hs=i("xlm-mlm-100-1280"),Gs=i(" (Modelagem de linguagem com m\xE1scara, 100 idiomas)"),Aa=d(),Fe=t("p"),Ws=i("Estes modelos s\xE3o utilizados para representa\xE7\xF5es gen\xE9ricas de frase diferentemente dos checkpoints XLM anteriores."),Na=d(),D=t("h2"),U=t("a"),So=t("span"),u(Ee.$$.fragment),Us=d(),Io=t("span"),Js=i("BERT"),Sa=d(),He=t("p"),Ks=i("Os seguintes modelos do BERT podem ser utilizados para tarefas multilingu\xEDsticas:"),Ia=d(),J=t("ul"),Ge=t("li"),Bo=t("code"),Qs=i("bert-base-multilingual-uncased"),Vs=i(" (Modelagem de linguagem com m\xE1scara + Previs\xE3o de frases, 102 idiomas)"),Ys=d(),We=t("li"),Ro=t("code"),Zs=i("bert-base-multilingual-cased"),el=i(" (Modelagem de linguagem com m\xE1scara + Previs\xE3o de frases, 104 idiomas)"),Ba=d(),Ue=t("p"),ol=i(`Estes modelos n\xE3o requerem language embeddings durante a infer\xEAncia. Devem identificar a linguagem a partir do contexto e realizar a infer\xEAncia em sequ\xEAncia.`),Ra=d(),X=t("h2"),K=t("a"),Fo=t("span"),u(ke.$$.fragment),al=d(),Ho=t("span"),tl=i("XLM-RoBERTa"),Fa=d(),Je=t("p"),sl=i("Os seguintes modelos do XLM-RoBERTa podem ser utilizados para tarefas multilingu\xEDsticas:"),Ha=d(),Q=t("ul"),Ke=t("li"),Go=t("code"),ll=i("xlm-roberta-base"),il=i(" (Modelagem de linguagem com m\xE1scara, 100 idiomas)"),rl=d(),Qe=t("li"),Wo=t("code"),nl=i("xlm-roberta-large"),ml=i(" Modelagem de linguagem com m\xE1scara, 100 idiomas)"),Ga=d(),Ve=t("p"),dl=i(`O XLM-RoBERTa foi treinado com 2,5 TB de dados do CommonCrawl rec\xE9m-criados e testados em 100 idiomas. Proporciona fortes vantagens sobre os modelos multilingu\xEDsticos publicados anteriormente como o mBERT e o XLM em tarefas subsequentes como a classifica\xE7\xE3o, a rotulagem de sequ\xEAncias e \xE0 respostas a perguntas.`),Wa=d(),A=t("h2"),V=t("a"),Uo=t("span"),u($e.$$.fragment),pl=d(),Jo=t("span"),cl=i("M2M100"),Ua=d(),Ye=t("p"),ul=i("Os seguintes modelos de M2M100 podem ser utilizados para tradu\xE7\xF5es multilingu\xEDsticas:"),Ja=d(),Y=t("ul"),Ze=t("li"),Ko=t("code"),fl=i("facebook/m2m100_418M"),gl=i(" (Tradu\xE7\xE3o)"),hl=d(),eo=t("li"),Qo=t("code"),_l=i("facebook/m2m100_1.2B"),vl=i(" (Tradu\xE7\xE3o)"),Ka=d(),Z=t("p"),bl=i("Neste exemplo, o checkpoint "),Vo=t("code"),El=i("facebook/m2m100_418M"),kl=i(` \xE9 carregado para traduzir do mandarim ao ingl\xEAs. \xC9 poss\xEDvel estabelecer o idioma de origem no tokenizador:`),Qa=d(),u(Me.$$.fragment),Va=d(),oo=t("p"),$l=i("Tokeniza\xE7\xE3o do texto:"),Ya=d(),u(xe.$$.fragment),Za=d(),x=t("p"),Ml=i(`O M2M100 for\xE7a o id do idioma de destino como o primeiro token gerado para traduzir ao idioma de destino. \xC9 definido o `),Yo=t("code"),xl=i("forced_bos_token_id"),wl=i(" como "),Zo=t("code"),zl=i("en"),yl=i(" no m\xE9todo "),ea=t("code"),ql=i("generate"),jl=i(" para traduzir ao ingl\xEAs."),et=d(),u(we.$$.fragment),ot=d(),N=t("h2"),ee=t("a"),oa=t("span"),u(ze.$$.fragment),Ll=d(),aa=t("span"),Ol=i("MBart"),at=d(),ao=t("p"),Cl=i("Os seguintes modelos do MBart podem ser utilizados para tradu\xE7\xE3o multilingu\xEDstica:"),tt=d(),E=t("ul"),to=t("li"),ta=t("code"),Tl=i("facebook/mbart-large-50-one-to-many-mmt"),Pl=i(" (Tradu\xE7\xE3o autom\xE1tica multilingu\xEDstica de um a v\xE1rios, 50 idiomas)"),Dl=d(),so=t("li"),sa=t("code"),Xl=i("facebook/mbart-large-50-many-to-many-mmt"),Al=i(" (Tradu\xE7\xE3o autom\xE1tica multilingu\xEDstica de v\xE1rios a v\xE1rios, 50 idiomas)"),Nl=d(),lo=t("li"),la=t("code"),Sl=i("facebook/mbart-large-50-many-to-one-mmt"),Il=i(" (Tradu\xE7\xE3o autom\xE1tica multilingu\xEDstica v\xE1rios a um, 50 idiomas)"),Bl=d(),io=t("li"),ia=t("code"),Rl=i("facebook/mbart-large-50"),Fl=i(" (Tradu\xE7\xE3o multilingu\xEDstica, 50 idiomas)"),Hl=d(),ra=t("li"),na=t("code"),Gl=i("facebook/mbart-large-cc25"),st=d(),oe=t("p"),Wl=i("Neste exemplo, carrega-se o checkpoint "),ma=t("code"),Ul=i("facebook/mbart-large-50-many-to-many-mmt"),Jl=i(` para traduzir do finland\xEAs ao ingl\xEAs. Pode-se definir o idioma de origem no tokenizador:`),lt=d(),u(ye.$$.fragment),it=d(),ro=t("p"),Kl=i("Tokenizando o texto:"),rt=d(),u(qe.$$.fragment),nt=d(),w=t("p"),Ql=i(`O MBart for\xE7a o id do idioma de destino como o primeiro token gerado para traduzir ao idioma de destino. \xC9 definido o `),da=t("code"),Vl=i("forced_bos_token_id"),Yl=i(" como "),pa=t("code"),Zl=i("en"),ei=i(" no m\xE9todo "),ca=t("code"),oi=i("generate"),ai=i(" para traduzir ao ingl\xEAs."),mt=d(),u(je.$$.fragment),dt=d(),ae=t("p"),ti=i("Se estiver usando o checkpoint "),ua=t("code"),si=i("facebook/mbart-large-50-many-to-one-mmt"),li=i(` n\xE3o ser\xE1 necess\xE1rio for\xE7ar o id do idioma de destino como sendo o primeiro token generado, caso contr\xE1rio a usagem \xE9 a mesma.`),this.h()},l(e){const n=on('[data-svelte="svelte-1phssyn"]',document.head);L=s(n,"META",{name:!0,content:!0}),n.forEach(a),fa=p(e),O=s(e,"H1",{class:!0});var ct=l(O);S=s(ct,"A",{id:!0,class:!0,href:!0});var zi=l(S);po=s(zi,"SPAN",{});var yi=l(po);f(ne.$$.fragment,yi),yi.forEach(a),zi.forEach(a),Lt=p(ct),co=s(ct,"SPAN",{});var qi=l(co);Ot=r(qi,"Modelos multilingu\xEDsticos para infer\xEAncia"),qi.forEach(a),ct.forEach(a),ga=p(e),f(me.$$.fragment,e),ha=p(e),y=s(e,"P",{});var no=l(y);Ct=r(no,`Existem v\xE1rios modelos multilingu\xEDsticos no \u{1F917} Transformers e seus usos para infer\xEAncia diferem dos modelos monol\xEDngues. No entanto, nem `),uo=s(no,"EM",{});var ji=l(uo);Tt=r(ji,"todos"),ji.forEach(a),Pt=r(no,` os usos dos modelos multil\xEDngues s\xE3o t\xE3o diferentes. Alguns modelos, como o `),de=s(no,"A",{href:!0,rel:!0});var Li=l(de);Dt=r(Li,"bert-base-multilingual-uncased"),Li.forEach(a),Xt=r(no,`, podem ser usados como se fossem monol\xEDngues. Este guia ir\xE1 te ajudar a usar modelos multil\xEDngues cujo uso difere para o prop\xF3sito de infer\xEAncia.`),no.forEach(a),_a=p(e),C=s(e,"H2",{class:!0});var ut=l(C);I=s(ut,"A",{id:!0,class:!0,href:!0});var Oi=l(I);fo=s(Oi,"SPAN",{});var Ci=l(fo);f(pe.$$.fragment,Ci),Ci.forEach(a),Oi.forEach(a),At=p(ut),go=s(ut,"SPAN",{});var Ti=l(go);Nt=r(Ti,"XLM"),Ti.forEach(a),ut.forEach(a),va=p(e),Le=s(e,"P",{});var Pi=l(Le);St=r(Pi,`O XLM tem dez checkpoints diferentes dos quais apenas um \xE9 monol\xEDngue. Os nove checkpoints restantes do modelo s\xE3o subdivididos em duas categorias: checkpoints que usam de language embeddings e os que n\xE3o.`),Pi.forEach(a),ba=p(e),T=s(e,"H3",{class:!0});var ft=l(T);B=s(ft,"A",{id:!0,class:!0,href:!0});var Di=l(B);ho=s(Di,"SPAN",{});var Xi=l(ho);f(ce.$$.fragment,Xi),Xi.forEach(a),Di.forEach(a),It=p(ft),_o=s(ft,"SPAN",{});var Ai=l(_o);Bt=r(Ai,"XLM com language embeddings"),Ai.forEach(a),ft.forEach(a),Ea=p(e),Oe=s(e,"P",{});var Ni=l(Oe);Rt=r(Ni,"Os seguintes modelos de XLM usam language embeddings para especificar a linguagem utilizada para a infer\xEAncia."),Ni.forEach(a),ka=p(e),b=s(e,"UL",{});var k=l(b);Ce=s(k,"LI",{});var ii=l(Ce);vo=s(ii,"CODE",{});var Si=l(vo);Ft=r(Si,"xlm-mlm-ende-1024"),Si.forEach(a),Ht=r(ii," (Masked language modeling, English-German)"),ii.forEach(a),Gt=p(k),Te=s(k,"LI",{});var ri=l(Te);bo=s(ri,"CODE",{});var Ii=l(bo);Wt=r(Ii,"xlm-mlm-enfr-1024"),Ii.forEach(a),Ut=r(ri," (Masked language modeling, English-French)"),ri.forEach(a),Jt=p(k),Pe=s(k,"LI",{});var ni=l(Pe);Eo=s(ni,"CODE",{});var Bi=l(Eo);Kt=r(Bi,"xlm-mlm-enro-1024"),Bi.forEach(a),Qt=r(ni," (Masked language modeling, English-Romanian)"),ni.forEach(a),Vt=p(k),De=s(k,"LI",{});var mi=l(De);ko=s(mi,"CODE",{});var Ri=l(ko);Yt=r(Ri,"xlm-mlm-xnli15-1024"),Ri.forEach(a),Zt=r(mi," (Masked language modeling, XNLI languages)"),mi.forEach(a),es=p(k),Xe=s(k,"LI",{});var di=l(Xe);$o=s(di,"CODE",{});var Fi=l($o);os=r(Fi,"xlm-mlm-tlm-xnli15-1024"),Fi.forEach(a),as=r(di," (Masked language modeling + translation, XNLI languages)"),di.forEach(a),ts=p(k),Ae=s(k,"LI",{});var pi=l(Ae);Mo=s(pi,"CODE",{});var Hi=l(Mo);ss=r(Hi,"xlm-clm-enfr-1024"),Hi.forEach(a),ls=r(pi," (Causal language modeling, English-French)"),pi.forEach(a),is=p(k),Ne=s(k,"LI",{});var ci=l(Ne);xo=s(ci,"CODE",{});var Gi=l(xo);rs=r(Gi,"xlm-clm-ende-1024"),Gi.forEach(a),ns=r(ci," (Causal language modeling, English-German)"),ci.forEach(a),k.forEach(a),$a=p(e),$=s(e,"P",{});var te=l($);ms=r(te,"Os language embeddings s\xE3o representados por um tensor de mesma dimens\xE3o que os "),wo=s(te,"CODE",{});var Wi=l(wo);ds=r(Wi,"input_ids"),Wi.forEach(a),ps=r(te,` passados ao modelo. Os valores destes tensores dependem do idioma utilizado e se identificam pelos atributos `),zo=s(te,"CODE",{});var Ui=l(zo);cs=r(Ui,"lang2id"),Ui.forEach(a),us=r(te," e "),yo=s(te,"CODE",{});var Ji=l(yo);fs=r(Ji,"id2lang"),Ji.forEach(a),gs=r(te," do tokenizador."),te.forEach(a),Ma=p(e),R=s(e,"P",{});var gt=l(R);hs=r(gt,"Neste exemplo, carregamos o checkpoint "),qo=s(gt,"CODE",{});var Ki=l(qo);_s=r(Ki,"xlm-clm-enfr-1024"),Ki.forEach(a),vs=r(gt,"(Causal language modeling, English-French):"),gt.forEach(a),xa=p(e),f(ue.$$.fragment,e),wa=p(e),F=s(e,"P",{});var ht=l(F);bs=r(ht,"O atributo "),jo=s(ht,"CODE",{});var Qi=l(jo);Es=r(Qi,"lang2id"),Qi.forEach(a),ks=r(ht," do tokenizador mostra os idiomas deste modelo e seus ids:"),ht.forEach(a),za=p(e),f(fe.$$.fragment,e),ya=p(e),Se=s(e,"P",{});var Vi=l(Se);$s=r(Vi,"Em seguida, cria-se um input de exemplo:"),Vi.forEach(a),qa=p(e),f(ge.$$.fragment,e),ja=p(e),M=s(e,"P",{});var se=l(M);Ms=r(se,"Estabelece-se o id do idioma, por exemplo "),Lo=s(se,"CODE",{});var Yi=l(Lo);xs=r(Yi,'"en"'),Yi.forEach(a),ws=r(se,`, e utiliza-se o mesmo para definir a language embedding. A language embedding \xE9 um tensor preenchido com `),Oo=s(se,"CODE",{});var Zi=l(Oo);zs=r(Zi,"0"),Zi.forEach(a),ys=r(se,`, que \xE9 o id de idioma para o ingl\xEAs. Este tensor deve ser do mesmo tamanho que os `),Co=s(se,"CODE",{});var er=l(Co);qs=r(er,"input_ids"),er.forEach(a),js=r(se,"."),se.forEach(a),La=p(e),f(he.$$.fragment,e),Oa=p(e),H=s(e,"P",{});var _t=l(H);Ls=r(_t,"Agora voc\xEA pode passar os "),To=s(_t,"CODE",{});var or=l(To);Os=r(or,"input_ids"),or.forEach(a),Cs=r(_t," e a language embedding ao modelo:"),_t.forEach(a),Ca=p(e),f(_e.$$.fragment,e),Ta=p(e),q=s(e,"P",{});var mo=l(q);Ts=r(mo,"O script "),ve=s(mo,"A",{href:!0,rel:!0});var ar=l(ve);Ps=r(ar,"run_generation.py"),ar.forEach(a),Ds=r(mo," pode gerar um texto com language embeddings utilizando os checkpoints "),Po=s(mo,"CODE",{});var tr=l(Po);Xs=r(tr,"xlm-clm"),tr.forEach(a),As=r(mo,"."),mo.forEach(a),Pa=p(e),P=s(e,"H3",{class:!0});var vt=l(P);G=s(vt,"A",{id:!0,class:!0,href:!0});var sr=l(G);Do=s(sr,"SPAN",{});var lr=l(Do);f(be.$$.fragment,lr),lr.forEach(a),sr.forEach(a),Ns=p(vt),Xo=s(vt,"SPAN",{});var ir=l(Xo);Ss=r(ir,"XLM sem language embeddings"),ir.forEach(a),vt.forEach(a),Da=p(e),Ie=s(e,"P",{});var rr=l(Ie);Is=r(rr,"Os seguintes modelos XLM n\xE3o requerem o uso de language embeddings durante a infer\xEAncia:"),rr.forEach(a),Xa=p(e),W=s(e,"UL",{});var bt=l(W);Be=s(bt,"LI",{});var ui=l(Be);Ao=s(ui,"CODE",{});var nr=l(Ao);Bs=r(nr,"xlm-mlm-17-1280"),nr.forEach(a),Rs=r(ui," (Modelagem de linguagem com m\xE1scara, 17 idiomas)"),ui.forEach(a),Fs=p(bt),Re=s(bt,"LI",{});var fi=l(Re);No=s(fi,"CODE",{});var mr=l(No);Hs=r(mr,"xlm-mlm-100-1280"),mr.forEach(a),Gs=r(fi," (Modelagem de linguagem com m\xE1scara, 100 idiomas)"),fi.forEach(a),bt.forEach(a),Aa=p(e),Fe=s(e,"P",{});var dr=l(Fe);Ws=r(dr,"Estes modelos s\xE3o utilizados para representa\xE7\xF5es gen\xE9ricas de frase diferentemente dos checkpoints XLM anteriores."),dr.forEach(a),Na=p(e),D=s(e,"H2",{class:!0});var Et=l(D);U=s(Et,"A",{id:!0,class:!0,href:!0});var pr=l(U);So=s(pr,"SPAN",{});var cr=l(So);f(Ee.$$.fragment,cr),cr.forEach(a),pr.forEach(a),Us=p(Et),Io=s(Et,"SPAN",{});var ur=l(Io);Js=r(ur,"BERT"),ur.forEach(a),Et.forEach(a),Sa=p(e),He=s(e,"P",{});var fr=l(He);Ks=r(fr,"Os seguintes modelos do BERT podem ser utilizados para tarefas multilingu\xEDsticas:"),fr.forEach(a),Ia=p(e),J=s(e,"UL",{});var kt=l(J);Ge=s(kt,"LI",{});var gi=l(Ge);Bo=s(gi,"CODE",{});var gr=l(Bo);Qs=r(gr,"bert-base-multilingual-uncased"),gr.forEach(a),Vs=r(gi," (Modelagem de linguagem com m\xE1scara + Previs\xE3o de frases, 102 idiomas)"),gi.forEach(a),Ys=p(kt),We=s(kt,"LI",{});var hi=l(We);Ro=s(hi,"CODE",{});var hr=l(Ro);Zs=r(hr,"bert-base-multilingual-cased"),hr.forEach(a),el=r(hi," (Modelagem de linguagem com m\xE1scara + Previs\xE3o de frases, 104 idiomas)"),hi.forEach(a),kt.forEach(a),Ba=p(e),Ue=s(e,"P",{});var _r=l(Ue);ol=r(_r,`Estes modelos n\xE3o requerem language embeddings durante a infer\xEAncia. Devem identificar a linguagem a partir do contexto e realizar a infer\xEAncia em sequ\xEAncia.`),_r.forEach(a),Ra=p(e),X=s(e,"H2",{class:!0});var $t=l(X);K=s($t,"A",{id:!0,class:!0,href:!0});var vr=l(K);Fo=s(vr,"SPAN",{});var br=l(Fo);f(ke.$$.fragment,br),br.forEach(a),vr.forEach(a),al=p($t),Ho=s($t,"SPAN",{});var Er=l(Ho);tl=r(Er,"XLM-RoBERTa"),Er.forEach(a),$t.forEach(a),Fa=p(e),Je=s(e,"P",{});var kr=l(Je);sl=r(kr,"Os seguintes modelos do XLM-RoBERTa podem ser utilizados para tarefas multilingu\xEDsticas:"),kr.forEach(a),Ha=p(e),Q=s(e,"UL",{});var Mt=l(Q);Ke=s(Mt,"LI",{});var _i=l(Ke);Go=s(_i,"CODE",{});var $r=l(Go);ll=r($r,"xlm-roberta-base"),$r.forEach(a),il=r(_i," (Modelagem de linguagem com m\xE1scara, 100 idiomas)"),_i.forEach(a),rl=p(Mt),Qe=s(Mt,"LI",{});var vi=l(Qe);Wo=s(vi,"CODE",{});var Mr=l(Wo);nl=r(Mr,"xlm-roberta-large"),Mr.forEach(a),ml=r(vi," Modelagem de linguagem com m\xE1scara, 100 idiomas)"),vi.forEach(a),Mt.forEach(a),Ga=p(e),Ve=s(e,"P",{});var xr=l(Ve);dl=r(xr,`O XLM-RoBERTa foi treinado com 2,5 TB de dados do CommonCrawl rec\xE9m-criados e testados em 100 idiomas. Proporciona fortes vantagens sobre os modelos multilingu\xEDsticos publicados anteriormente como o mBERT e o XLM em tarefas subsequentes como a classifica\xE7\xE3o, a rotulagem de sequ\xEAncias e \xE0 respostas a perguntas.`),xr.forEach(a),Wa=p(e),A=s(e,"H2",{class:!0});var xt=l(A);V=s(xt,"A",{id:!0,class:!0,href:!0});var wr=l(V);Uo=s(wr,"SPAN",{});var zr=l(Uo);f($e.$$.fragment,zr),zr.forEach(a),wr.forEach(a),pl=p(xt),Jo=s(xt,"SPAN",{});var yr=l(Jo);cl=r(yr,"M2M100"),yr.forEach(a),xt.forEach(a),Ua=p(e),Ye=s(e,"P",{});var qr=l(Ye);ul=r(qr,"Os seguintes modelos de M2M100 podem ser utilizados para tradu\xE7\xF5es multilingu\xEDsticas:"),qr.forEach(a),Ja=p(e),Y=s(e,"UL",{});var wt=l(Y);Ze=s(wt,"LI",{});var bi=l(Ze);Ko=s(bi,"CODE",{});var jr=l(Ko);fl=r(jr,"facebook/m2m100_418M"),jr.forEach(a),gl=r(bi," (Tradu\xE7\xE3o)"),bi.forEach(a),hl=p(wt),eo=s(wt,"LI",{});var Ei=l(eo);Qo=s(Ei,"CODE",{});var Lr=l(Qo);_l=r(Lr,"facebook/m2m100_1.2B"),Lr.forEach(a),vl=r(Ei," (Tradu\xE7\xE3o)"),Ei.forEach(a),wt.forEach(a),Ka=p(e),Z=s(e,"P",{});var zt=l(Z);bl=r(zt,"Neste exemplo, o checkpoint "),Vo=s(zt,"CODE",{});var Or=l(Vo);El=r(Or,"facebook/m2m100_418M"),Or.forEach(a),kl=r(zt,` \xE9 carregado para traduzir do mandarim ao ingl\xEAs. \xC9 poss\xEDvel estabelecer o idioma de origem no tokenizador:`),zt.forEach(a),Qa=p(e),f(Me.$$.fragment,e),Va=p(e),oo=s(e,"P",{});var Cr=l(oo);$l=r(Cr,"Tokeniza\xE7\xE3o do texto:"),Cr.forEach(a),Ya=p(e),f(xe.$$.fragment,e),Za=p(e),x=s(e,"P",{});var le=l(x);Ml=r(le,`O M2M100 for\xE7a o id do idioma de destino como o primeiro token gerado para traduzir ao idioma de destino. \xC9 definido o `),Yo=s(le,"CODE",{});var Tr=l(Yo);xl=r(Tr,"forced_bos_token_id"),Tr.forEach(a),wl=r(le," como "),Zo=s(le,"CODE",{});var Pr=l(Zo);zl=r(Pr,"en"),Pr.forEach(a),yl=r(le," no m\xE9todo "),ea=s(le,"CODE",{});var Dr=l(ea);ql=r(Dr,"generate"),Dr.forEach(a),jl=r(le," para traduzir ao ingl\xEAs."),le.forEach(a),et=p(e),f(we.$$.fragment,e),ot=p(e),N=s(e,"H2",{class:!0});var yt=l(N);ee=s(yt,"A",{id:!0,class:!0,href:!0});var Xr=l(ee);oa=s(Xr,"SPAN",{});var Ar=l(oa);f(ze.$$.fragment,Ar),Ar.forEach(a),Xr.forEach(a),Ll=p(yt),aa=s(yt,"SPAN",{});var Nr=l(aa);Ol=r(Nr,"MBart"),Nr.forEach(a),yt.forEach(a),at=p(e),ao=s(e,"P",{});var Sr=l(ao);Cl=r(Sr,"Os seguintes modelos do MBart podem ser utilizados para tradu\xE7\xE3o multilingu\xEDstica:"),Sr.forEach(a),tt=p(e),E=s(e,"UL",{});var j=l(E);to=s(j,"LI",{});var ki=l(to);ta=s(ki,"CODE",{});var Ir=l(ta);Tl=r(Ir,"facebook/mbart-large-50-one-to-many-mmt"),Ir.forEach(a),Pl=r(ki," (Tradu\xE7\xE3o autom\xE1tica multilingu\xEDstica de um a v\xE1rios, 50 idiomas)"),ki.forEach(a),Dl=p(j),so=s(j,"LI",{});var $i=l(so);sa=s($i,"CODE",{});var Br=l(sa);Xl=r(Br,"facebook/mbart-large-50-many-to-many-mmt"),Br.forEach(a),Al=r($i," (Tradu\xE7\xE3o autom\xE1tica multilingu\xEDstica de v\xE1rios a v\xE1rios, 50 idiomas)"),$i.forEach(a),Nl=p(j),lo=s(j,"LI",{});var Mi=l(lo);la=s(Mi,"CODE",{});var Rr=l(la);Sl=r(Rr,"facebook/mbart-large-50-many-to-one-mmt"),Rr.forEach(a),Il=r(Mi," (Tradu\xE7\xE3o autom\xE1tica multilingu\xEDstica v\xE1rios a um, 50 idiomas)"),Mi.forEach(a),Bl=p(j),io=s(j,"LI",{});var xi=l(io);ia=s(xi,"CODE",{});var Fr=l(ia);Rl=r(Fr,"facebook/mbart-large-50"),Fr.forEach(a),Fl=r(xi," (Tradu\xE7\xE3o multilingu\xEDstica, 50 idiomas)"),xi.forEach(a),Hl=p(j),ra=s(j,"LI",{});var Hr=l(ra);na=s(Hr,"CODE",{});var Gr=l(na);Gl=r(Gr,"facebook/mbart-large-cc25"),Gr.forEach(a),Hr.forEach(a),j.forEach(a),st=p(e),oe=s(e,"P",{});var qt=l(oe);Wl=r(qt,"Neste exemplo, carrega-se o checkpoint "),ma=s(qt,"CODE",{});var Wr=l(ma);Ul=r(Wr,"facebook/mbart-large-50-many-to-many-mmt"),Wr.forEach(a),Jl=r(qt,` para traduzir do finland\xEAs ao ingl\xEAs. Pode-se definir o idioma de origem no tokenizador:`),qt.forEach(a),lt=p(e),f(ye.$$.fragment,e),it=p(e),ro=s(e,"P",{});var Ur=l(ro);Kl=r(Ur,"Tokenizando o texto:"),Ur.forEach(a),rt=p(e),f(qe.$$.fragment,e),nt=p(e),w=s(e,"P",{});var ie=l(w);Ql=r(ie,`O MBart for\xE7a o id do idioma de destino como o primeiro token gerado para traduzir ao idioma de destino. \xC9 definido o `),da=s(ie,"CODE",{});var Jr=l(da);Vl=r(Jr,"forced_bos_token_id"),Jr.forEach(a),Yl=r(ie," como "),pa=s(ie,"CODE",{});var Kr=l(pa);Zl=r(Kr,"en"),Kr.forEach(a),ei=r(ie," no m\xE9todo "),ca=s(ie,"CODE",{});var Qr=l(ca);oi=r(Qr,"generate"),Qr.forEach(a),ai=r(ie," para traduzir ao ingl\xEAs."),ie.forEach(a),mt=p(e),f(je.$$.fragment,e),dt=p(e),ae=s(e,"P",{});var jt=l(ae);ti=r(jt,"Se estiver usando o checkpoint "),ua=s(jt,"CODE",{});var Vr=l(ua);si=r(Vr,"facebook/mbart-large-50-many-to-one-mmt"),Vr.forEach(a),li=r(jt,` n\xE3o ser\xE1 necess\xE1rio for\xE7ar o id do idioma de destino como sendo o primeiro token generado, caso contr\xE1rio a usagem \xE9 a mesma.`),jt.forEach(a),this.h()},h(){c(L,"name","hf:doc:metadata"),c(L,"content",JSON.stringify(rn)),c(S,"id","modelos-multilingusticos-para-inferncia"),c(S,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(S,"href","#modelos-multilingusticos-para-inferncia"),c(O,"class","relative group"),c(de,"href","https://huggingface.co/bert-base-multilingual-uncased"),c(de,"rel","nofollow"),c(I,"id","xlm"),c(I,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(I,"href","#xlm"),c(C,"class","relative group"),c(B,"id","xlm-com-language-embeddings"),c(B,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(B,"href","#xlm-com-language-embeddings"),c(T,"class","relative group"),c(ve,"href","https://github.com/huggingface/transformers/tree/master/examples/pytorch/text-generation/run_generation.py"),c(ve,"rel","nofollow"),c(G,"id","xlm-sem-language-embeddings"),c(G,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(G,"href","#xlm-sem-language-embeddings"),c(P,"class","relative group"),c(U,"id","bert"),c(U,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(U,"href","#bert"),c(D,"class","relative group"),c(K,"id","xlmroberta"),c(K,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(K,"href","#xlmroberta"),c(X,"class","relative group"),c(V,"id","m2m100"),c(V,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(V,"href","#m2m100"),c(A,"class","relative group"),c(ee,"id","mbart"),c(ee,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),c(ee,"href","#mbart"),c(N,"class","relative group")},m(e,n){o(document.head,L),m(e,fa,n),m(e,O,n),o(O,S),o(S,po),g(ne,po,null),o(O,Lt),o(O,co),o(co,Ot),m(e,ga,n),g(me,e,n),m(e,ha,n),m(e,y,n),o(y,Ct),o(y,uo),o(uo,Tt),o(y,Pt),o(y,de),o(de,Dt),o(y,Xt),m(e,_a,n),m(e,C,n),o(C,I),o(I,fo),g(pe,fo,null),o(C,At),o(C,go),o(go,Nt),m(e,va,n),m(e,Le,n),o(Le,St),m(e,ba,n),m(e,T,n),o(T,B),o(B,ho),g(ce,ho,null),o(T,It),o(T,_o),o(_o,Bt),m(e,Ea,n),m(e,Oe,n),o(Oe,Rt),m(e,ka,n),m(e,b,n),o(b,Ce),o(Ce,vo),o(vo,Ft),o(Ce,Ht),o(b,Gt),o(b,Te),o(Te,bo),o(bo,Wt),o(Te,Ut),o(b,Jt),o(b,Pe),o(Pe,Eo),o(Eo,Kt),o(Pe,Qt),o(b,Vt),o(b,De),o(De,ko),o(ko,Yt),o(De,Zt),o(b,es),o(b,Xe),o(Xe,$o),o($o,os),o(Xe,as),o(b,ts),o(b,Ae),o(Ae,Mo),o(Mo,ss),o(Ae,ls),o(b,is),o(b,Ne),o(Ne,xo),o(xo,rs),o(Ne,ns),m(e,$a,n),m(e,$,n),o($,ms),o($,wo),o(wo,ds),o($,ps),o($,zo),o(zo,cs),o($,us),o($,yo),o(yo,fs),o($,gs),m(e,Ma,n),m(e,R,n),o(R,hs),o(R,qo),o(qo,_s),o(R,vs),m(e,xa,n),g(ue,e,n),m(e,wa,n),m(e,F,n),o(F,bs),o(F,jo),o(jo,Es),o(F,ks),m(e,za,n),g(fe,e,n),m(e,ya,n),m(e,Se,n),o(Se,$s),m(e,qa,n),g(ge,e,n),m(e,ja,n),m(e,M,n),o(M,Ms),o(M,Lo),o(Lo,xs),o(M,ws),o(M,Oo),o(Oo,zs),o(M,ys),o(M,Co),o(Co,qs),o(M,js),m(e,La,n),g(he,e,n),m(e,Oa,n),m(e,H,n),o(H,Ls),o(H,To),o(To,Os),o(H,Cs),m(e,Ca,n),g(_e,e,n),m(e,Ta,n),m(e,q,n),o(q,Ts),o(q,ve),o(ve,Ps),o(q,Ds),o(q,Po),o(Po,Xs),o(q,As),m(e,Pa,n),m(e,P,n),o(P,G),o(G,Do),g(be,Do,null),o(P,Ns),o(P,Xo),o(Xo,Ss),m(e,Da,n),m(e,Ie,n),o(Ie,Is),m(e,Xa,n),m(e,W,n),o(W,Be),o(Be,Ao),o(Ao,Bs),o(Be,Rs),o(W,Fs),o(W,Re),o(Re,No),o(No,Hs),o(Re,Gs),m(e,Aa,n),m(e,Fe,n),o(Fe,Ws),m(e,Na,n),m(e,D,n),o(D,U),o(U,So),g(Ee,So,null),o(D,Us),o(D,Io),o(Io,Js),m(e,Sa,n),m(e,He,n),o(He,Ks),m(e,Ia,n),m(e,J,n),o(J,Ge),o(Ge,Bo),o(Bo,Qs),o(Ge,Vs),o(J,Ys),o(J,We),o(We,Ro),o(Ro,Zs),o(We,el),m(e,Ba,n),m(e,Ue,n),o(Ue,ol),m(e,Ra,n),m(e,X,n),o(X,K),o(K,Fo),g(ke,Fo,null),o(X,al),o(X,Ho),o(Ho,tl),m(e,Fa,n),m(e,Je,n),o(Je,sl),m(e,Ha,n),m(e,Q,n),o(Q,Ke),o(Ke,Go),o(Go,ll),o(Ke,il),o(Q,rl),o(Q,Qe),o(Qe,Wo),o(Wo,nl),o(Qe,ml),m(e,Ga,n),m(e,Ve,n),o(Ve,dl),m(e,Wa,n),m(e,A,n),o(A,V),o(V,Uo),g($e,Uo,null),o(A,pl),o(A,Jo),o(Jo,cl),m(e,Ua,n),m(e,Ye,n),o(Ye,ul),m(e,Ja,n),m(e,Y,n),o(Y,Ze),o(Ze,Ko),o(Ko,fl),o(Ze,gl),o(Y,hl),o(Y,eo),o(eo,Qo),o(Qo,_l),o(eo,vl),m(e,Ka,n),m(e,Z,n),o(Z,bl),o(Z,Vo),o(Vo,El),o(Z,kl),m(e,Qa,n),g(Me,e,n),m(e,Va,n),m(e,oo,n),o(oo,$l),m(e,Ya,n),g(xe,e,n),m(e,Za,n),m(e,x,n),o(x,Ml),o(x,Yo),o(Yo,xl),o(x,wl),o(x,Zo),o(Zo,zl),o(x,yl),o(x,ea),o(ea,ql),o(x,jl),m(e,et,n),g(we,e,n),m(e,ot,n),m(e,N,n),o(N,ee),o(ee,oa),g(ze,oa,null),o(N,Ll),o(N,aa),o(aa,Ol),m(e,at,n),m(e,ao,n),o(ao,Cl),m(e,tt,n),m(e,E,n),o(E,to),o(to,ta),o(ta,Tl),o(to,Pl),o(E,Dl),o(E,so),o(so,sa),o(sa,Xl),o(so,Al),o(E,Nl),o(E,lo),o(lo,la),o(la,Sl),o(lo,Il),o(E,Bl),o(E,io),o(io,ia),o(ia,Rl),o(io,Fl),o(E,Hl),o(E,ra),o(ra,na),o(na,Gl),m(e,st,n),m(e,oe,n),o(oe,Wl),o(oe,ma),o(ma,Ul),o(oe,Jl),m(e,lt,n),g(ye,e,n),m(e,it,n),m(e,ro,n),o(ro,Kl),m(e,rt,n),g(qe,e,n),m(e,nt,n),m(e,w,n),o(w,Ql),o(w,da),o(da,Vl),o(w,Yl),o(w,pa),o(pa,Zl),o(w,ei),o(w,ca),o(ca,oi),o(w,ai),m(e,mt,n),g(je,e,n),m(e,dt,n),m(e,ae,n),o(ae,ti),o(ae,ua),o(ua,si),o(ae,li),pt=!0},p:an,i(e){pt||(h(ne.$$.fragment,e),h(me.$$.fragment,e),h(pe.$$.fragment,e),h(ce.$$.fragment,e),h(ue.$$.fragment,e),h(fe.$$.fragment,e),h(ge.$$.fragment,e),h(he.$$.fragment,e),h(_e.$$.fragment,e),h(be.$$.fragment,e),h(Ee.$$.fragment,e),h(ke.$$.fragment,e),h($e.$$.fragment,e),h(Me.$$.fragment,e),h(xe.$$.fragment,e),h(we.$$.fragment,e),h(ze.$$.fragment,e),h(ye.$$.fragment,e),h(qe.$$.fragment,e),h(je.$$.fragment,e),pt=!0)},o(e){_(ne.$$.fragment,e),_(me.$$.fragment,e),_(pe.$$.fragment,e),_(ce.$$.fragment,e),_(ue.$$.fragment,e),_(fe.$$.fragment,e),_(ge.$$.fragment,e),_(he.$$.fragment,e),_(_e.$$.fragment,e),_(be.$$.fragment,e),_(Ee.$$.fragment,e),_(ke.$$.fragment,e),_($e.$$.fragment,e),_(Me.$$.fragment,e),_(xe.$$.fragment,e),_(we.$$.fragment,e),_(ze.$$.fragment,e),_(ye.$$.fragment,e),_(qe.$$.fragment,e),_(je.$$.fragment,e),pt=!1},d(e){a(L),e&&a(fa),e&&a(O),v(ne),e&&a(ga),v(me,e),e&&a(ha),e&&a(y),e&&a(_a),e&&a(C),v(pe),e&&a(va),e&&a(Le),e&&a(ba),e&&a(T),v(ce),e&&a(Ea),e&&a(Oe),e&&a(ka),e&&a(b),e&&a($a),e&&a($),e&&a(Ma),e&&a(R),e&&a(xa),v(ue,e),e&&a(wa),e&&a(F),e&&a(za),v(fe,e),e&&a(ya),e&&a(Se),e&&a(qa),v(ge,e),e&&a(ja),e&&a(M),e&&a(La),v(he,e),e&&a(Oa),e&&a(H),e&&a(Ca),v(_e,e),e&&a(Ta),e&&a(q),e&&a(Pa),e&&a(P),v(be),e&&a(Da),e&&a(Ie),e&&a(Xa),e&&a(W),e&&a(Aa),e&&a(Fe),e&&a(Na),e&&a(D),v(Ee),e&&a(Sa),e&&a(He),e&&a(Ia),e&&a(J),e&&a(Ba),e&&a(Ue),e&&a(Ra),e&&a(X),v(ke),e&&a(Fa),e&&a(Je),e&&a(Ha),e&&a(Q),e&&a(Ga),e&&a(Ve),e&&a(Wa),e&&a(A),v($e),e&&a(Ua),e&&a(Ye),e&&a(Ja),e&&a(Y),e&&a(Ka),e&&a(Z),e&&a(Qa),v(Me,e),e&&a(Va),e&&a(oo),e&&a(Ya),v(xe,e),e&&a(Za),e&&a(x),e&&a(et),v(we,e),e&&a(ot),e&&a(N),v(ze),e&&a(at),e&&a(ao),e&&a(tt),e&&a(E),e&&a(st),e&&a(oe),e&&a(lt),v(ye,e),e&&a(it),e&&a(ro),e&&a(rt),v(qe,e),e&&a(nt),e&&a(w),e&&a(mt),v(je,e),e&&a(dt),e&&a(ae)}}}const rn={local:"modelos-multilingusticos-para-inferncia",sections:[{local:"xlm",sections:[{local:"xlm-com-language-embeddings",title:"XLM com language embeddings"},{local:"xlm-sem-language-embeddings",title:"XLM sem language embeddings"}],title:"XLM"},{local:"bert",title:"BERT"},{local:"xlmroberta",title:"XLM-RoBERTa"},{local:"m2m100",title:"M2M100"},{local:"mbart",title:"MBart"}],title:"Modelos multilingu\xEDsticos para infer\xEAncia"};function nn(wi){return tn(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class un extends Yr{constructor(L){super();Zr(this,L,nn,ln,en,{})}}export{un as default,rn as metadata};
487
0
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/_app/pages/tasks/sequence_classification.mdx-hf-doc-builder.js
import{S as Ba,i as Ra,s as La,e as i,k as $,w as q,t as r,M as Wa,c as p,d as a,m as g,a as c,x as C,h as n,b as w,G as t,g as m,y as T,q as D,o as y,B as P,v as Ua,L as Na}from"../../chunks/vendor-hf-doc-builder.js";import{T as ut}from"../../chunks/Tip-hf-doc-builder.js";import{Y as Va}from"../../chunks/Youtube-hf-doc-builder.js";import{I as St}from"../../chunks/IconCopyLink-hf-doc-builder.js";import{C as le}from"../../chunks/CodeBlock-hf-doc-builder.js";import{F as Ia,M as Ot}from"../../chunks/Markdown-hf-doc-builder.js";function Ga(S){let s,f,o,u,b;return{c(){s=i("p"),f=r("Consulte a "),o=i("a"),u=r("p\xE1gina de tarefas de classifica\xE7\xE3o de texto"),b=r(" para obter mais informa\xE7\xF5es sobre outras formas de classifica\xE7\xE3o de texto e seus modelos, conjuntos de dados e m\xE9tricas associados."),this.h()},l(_){s=p(_,"P",{});var v=c(s);f=n(v,"Consulte a "),o=p(v,"A",{href:!0,rel:!0});var z=c(o);u=n(z,"p\xE1gina de tarefas de classifica\xE7\xE3o de texto"),z.forEach(a),b=n(v," para obter mais informa\xE7\xF5es sobre outras formas de classifica\xE7\xE3o de texto e seus modelos, conjuntos de dados e m\xE9tricas associados."),v.forEach(a),this.h()},h(){w(o,"href","https://huggingface.co/tasks/text-classification"),w(o,"rel","nofollow")},m(_,v){m(_,s,v),t(s,f),t(s,o),t(o,u),t(s,b)},d(_){_&&a(s)}}}function Ha(S){let s,f;return s=new le({props:{code:`from transformers import DataCollatorWithPadding data_collator = DataCollatorWithPadding(tokenizer=tokenizer)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorWithPadding <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorWithPadding(tokenizer=tokenizer)`}}),{c(){q(s.$$.fragment)},l(o){C(s.$$.fragment,o)},m(o,u){T(s,o,u),f=!0},p:Na,i(o){f||(D(s.$$.fragment,o),f=!0)},o(o){y(s.$$.fragment,o),f=!1},d(o){P(s,o)}}}function Ka(S){let s,f;return s=new Ot({props:{$$slots:{default:[Ha]},$$scope:{ctx:S}}}),{c(){q(s.$$.fragment)},l(o){C(s.$$.fragment,o)},m(o,u){T(s,o,u),f=!0},p(o,u){const b={};u&2&&(b.$$scope={dirty:u,ctx:o}),s.$set(b)},i(o){f||(D(s.$$.fragment,o),f=!0)},o(o){y(s.$$.fragment,o),f=!1},d(o){P(s,o)}}}function Ja(S){let s,f;return s=new le({props:{code:`from transformers import DataCollatorWithPadding data_collator = DataCollatorWithPadding(tokenizer=tokenizer, return_tensors="tf")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorWithPadding <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorWithPadding(tokenizer=tokenizer, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)`}}),{c(){q(s.$$.fragment)},l(o){C(s.$$.fragment,o)},m(o,u){T(s,o,u),f=!0},p:Na,i(o){f||(D(s.$$.fragment,o),f=!0)},o(o){y(s.$$.fragment,o),f=!1},d(o){P(s,o)}}}function Ya(S){let s,f;return s=new Ot({props:{$$slots:{default:[Ja]},$$scope:{ctx:S}}}),{c(){q(s.$$.fragment)},l(o){C(s.$$.fragment,o)},m(o,u){T(s,o,u),f=!0},p(o,u){const b={};u&2&&(b.$$scope={dirty:u,ctx:o}),s.$set(b)},i(o){f||(D(s.$$.fragment,o),f=!0)},o(o){y(s.$$.fragment,o),f=!1},d(o){P(s,o)}}}function Qa(S){let s,f,o,u,b,_,v,z;return{c(){s=i("p"),f=r("Se voc\xEA n\xE3o estiver familiarizado com o fine-tuning de um modelo com o "),o=i("code"),u=r("Trainer"),b=r(", d\xEA uma olhada no tutorial b\xE1sico "),_=i("a"),v=r("aqui"),z=r("!"),this.h()},l(x){s=p(x,"P",{});var j=c(s);f=n(j,"Se voc\xEA n\xE3o estiver familiarizado com o fine-tuning de um modelo com o "),o=p(j,"CODE",{});var A=c(o);u=n(A,"Trainer"),A.forEach(a),b=n(j,", d\xEA uma olhada no tutorial b\xE1sico "),_=p(j,"A",{href:!0});var I=c(_);v=n(I,"aqui"),I.forEach(a),z=n(j,"!"),j.forEach(a),this.h()},h(){w(_,"href","../training#finetune-with-trainer")},m(x,j){m(x,s,j),t(s,f),t(s,o),t(o,u),t(s,b),t(s,_),t(_,v),t(s,z)},d(x){x&&a(s)}}}function Xa(S){let s,f,o,u,b,_,v,z;return{c(){s=i("p"),f=r("O "),o=i("code"),u=r("Trainer"),b=r(" aplicar\xE1 o preenchimento din\xE2mico por padr\xE3o quando voc\xEA definir o argumento "),_=i("code"),v=r("tokenizer"),z=r(" dele. Nesse caso, voc\xEA n\xE3o precisa especificar um data collator explicitamente.")},l(x){s=p(x,"P",{});var j=c(s);f=n(j,"O "),o=p(j,"CODE",{});var A=c(o);u=n(A,"Trainer"),A.forEach(a),b=n(j," aplicar\xE1 o preenchimento din\xE2mico por padr\xE3o quando voc\xEA definir o argumento "),_=p(j,"CODE",{});var I=c(_);v=n(I,"tokenizer"),I.forEach(a),z=n(j," dele. Nesse caso, voc\xEA n\xE3o precisa especificar um data collator explicitamente."),j.forEach(a)},m(x,j){m(x,s,j),t(s,f),t(s,o),t(o,u),t(s,b),t(s,_),t(_,v),t(s,z)},d(x){x&&a(s)}}}function Za(S){let s,f,o,u,b,_,v,z,x,j,A,I,Y,M,H,O,se,U,be,fe,F,ue,V,he,G,_e,N,K,R,Q,X,oe,L,ee,W,$e;return v=new le({props:{code:`from transformers import AutoModelForSequenceClassification, TrainingArguments, Trainer model = AutoModelForSequenceClassification.from_pretrained("distilbert-base-uncased", num_labels=2)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForSequenceClassification, TrainingArguments, Trainer <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>, num_labels=<span class="hljs-number">2</span>)`}}),x=new ut({props:{$$slots:{default:[Qa]},$$scope:{ctx:S}}}),L=new le({props:{code:`training_args = TrainingArguments( output_dir="./results", learning_rate=2e-5, per_device_train_batch_size=16, per_device_eval_batch_size=16, num_train_epochs=5, weight_decay=0.01, ) trainer = Trainer( model=model, args=training_args, train_dataset=tokenized_imdb["train"], eval_dataset=tokenized_imdb["test"], tokenizer=tokenizer, data_collator=data_collator, ) trainer.train()`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">2e-5</span>, <span class="hljs-meta">... </span> per_device_train_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> per_device_eval_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> num_train_epochs=<span class="hljs-number">5</span>, <span class="hljs-meta">... </span> weight_decay=<span class="hljs-number">0.01</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=tokenized_imdb[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=tokenized_imdb[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> tokenizer=tokenizer, <span class="hljs-meta">... </span> data_collator=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()`}}),W=new ut({props:{$$slots:{default:[Xa]},$$scope:{ctx:S}}}),{c(){s=i("p"),f=r("Carregue o DistilBERT com "),o=i("code"),u=r("AutoModelForSequenceClassification"),b=r(" junto com o n\xFAmero de r\xF3tulos esperados:"),_=$(),q(v.$$.fragment),z=$(),q(x.$$.fragment),j=$(),A=i("p"),I=r("Nesse ponto, restam apenas tr\xEAs passos:"),Y=$(),M=i("ol"),H=i("li"),O=r("Definir seus hiperpar\xE2metros de treinamento em "),se=i("code"),U=r("TrainingArguments"),be=r("."),fe=$(),F=i("li"),ue=r("Passar os argumentos de treinamento para o "),V=i("code"),he=r("Trainer"),G=r(" junto com o modelo, conjunto de dados, tokenizador e o data collator."),_e=$(),N=i("li"),K=r("Chamar a fun\xE7\xE3o "),R=i("code"),Q=r("train()"),X=r(" para executar o fine-tuning do seu modelo."),oe=$(),q(L.$$.fragment),ee=$(),q(W.$$.fragment)},l(d){s=p(d,"P",{});var k=c(s);f=n(k,"Carregue o DistilBERT com "),o=p(k,"CODE",{});var te=c(o);u=n(te,"AutoModelForSequenceClassification"),te.forEach(a),b=n(k," junto com o n\xFAmero de r\xF3tulos esperados:"),k.forEach(a),_=g(d),C(v.$$.fragment,d),z=g(d),C(x.$$.fragment,d),j=g(d),A=p(d,"P",{});var B=c(A);I=n(B,"Nesse ponto, restam apenas tr\xEAs passos:"),B.forEach(a),Y=g(d),M=p(d,"OL",{});var re=c(M);H=p(re,"LI",{});var J=c(H);O=n(J,"Definir seus hiperpar\xE2metros de treinamento em "),se=p(J,"CODE",{});var Z=c(se);U=n(Z,"TrainingArguments"),Z.forEach(a),be=n(J,"."),J.forEach(a),fe=g(re),F=p(re,"LI",{});var ie=c(F);ue=n(ie,"Passar os argumentos de treinamento para o "),V=p(ie,"CODE",{});var pe=c(V);he=n(pe,"Trainer"),pe.forEach(a),G=n(ie," junto com o modelo, conjunto de dados, tokenizador e o data collator."),ie.forEach(a),_e=g(re),N=p(re,"LI",{});var ce=c(N);K=n(ce,"Chamar a fun\xE7\xE3o "),R=p(ce,"CODE",{});var ae=c(R);Q=n(ae,"train()"),ae.forEach(a),X=n(ce," para executar o fine-tuning do seu modelo."),ce.forEach(a),re.forEach(a),oe=g(d),C(L.$$.fragment,d),ee=g(d),C(W.$$.fragment,d)},m(d,k){m(d,s,k),t(s,f),t(s,o),t(o,u),t(s,b),m(d,_,k),T(v,d,k),m(d,z,k),T(x,d,k),m(d,j,k),m(d,A,k),t(A,I),m(d,Y,k),m(d,M,k),t(M,H),t(H,O),t(H,se),t(se,U),t(H,be),t(M,fe),t(M,F),t(F,ue),t(F,V),t(V,he),t(F,G),t(M,_e),t(M,N),t(N,K),t(N,R),t(R,Q),t(N,X),m(d,oe,k),T(L,d,k),m(d,ee,k),T(W,d,k),$e=!0},p(d,k){const te={};k&2&&(te.$$scope={dirty:k,ctx:d}),x.$set(te);const B={};k&2&&(B.$$scope={dirty:k,ctx:d}),W.$set(B)},i(d){$e||(D(v.$$.fragment,d),D(x.$$.fragment,d),D(L.$$.fragment,d),D(W.$$.fragment,d),$e=!0)},o(d){y(v.$$.fragment,d),y(x.$$.fragment,d),y(L.$$.fragment,d),y(W.$$.fragment,d),$e=!1},d(d){d&&a(s),d&&a(_),P(v,d),d&&a(z),P(x,d),d&&a(j),d&&a(A),d&&a(Y),d&&a(M),d&&a(oe),P(L,d),d&&a(ee),P(W,d)}}}function es(S){let s,f;return s=new Ot({props:{$$slots:{default:[Za]},$$scope:{ctx:S}}}),{c(){q(s.$$.fragment)},l(o){C(s.$$.fragment,o)},m(o,u){T(s,o,u),f=!0},p(o,u){const b={};u&2&&(b.$$scope={dirty:u,ctx:o}),s.$set(b)},i(o){f||(D(s.$$.fragment,o),f=!0)},o(o){y(s.$$.fragment,o),f=!1},d(o){P(s,o)}}}function ts(S){let s,f,o,u,b;return{c(){s=i("p"),f=r("Se voc\xEA n\xE3o estiver familiarizado com o fine-tuning de um modelo com o Keras, d\xEA uma olhada no tutorial b\xE1sico "),o=i("a"),u=r("aqui"),b=r("!"),this.h()},l(_){s=p(_,"P",{});var v=c(s);f=n(v,"Se voc\xEA n\xE3o estiver familiarizado com o fine-tuning de um modelo com o Keras, d\xEA uma olhada no tutorial b\xE1sico "),o=p(v,"A",{href:!0});var z=c(o);u=n(z,"aqui"),z.forEach(a),b=n(v,"!"),v.forEach(a),this.h()},h(){w(o,"href","training#finetune-with-keras")},m(_,v){m(_,s,v),t(s,f),t(s,o),t(o,u),t(s,b)},d(_){_&&a(s)}}}function as(S){let s,f,o,u,b,_,v,z,x,j,A,I,Y,M,H,O,se,U,be,fe,F,ue,V,he,G,_e,N,K,R,Q,X,oe,L,ee,W,$e,d,k,te,B,re,J,Z,ie,pe,ce,ae,me;return M=new le({props:{code:`tf_train_set = tokenized_imdb["train"].to_tf_dataset( columns=["attention_mask", "input_ids", "label"], shuffle=True, batch_size=16, collate_fn=data_collator, ) tf_validation_set = tokenized_imdb["test"].to_tf_dataset( columns=["attention_mask", "input_ids", "label"], shuffle=False, batch_size=16, collate_fn=data_collator, )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>tf_train_set = tokenized_imdb[<span class="hljs-string">&quot;train&quot;</span>].to_tf_dataset( <span class="hljs-meta">... </span> columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;label&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_validation_set = tokenized_imdb[<span class="hljs-string">&quot;test&quot;</span>].to_tf_dataset( <span class="hljs-meta">... </span> columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;label&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>)`}}),O=new ut({props:{$$slots:{default:[ts]},$$scope:{ctx:S}}}),F=new le({props:{code:`from transformers import create_optimizer import tensorflow as tf batch_size = 16 num_epochs = 5 batches_per_epoch = len(tokenized_imdb["train"]) // batch_size total_train_steps = int(batches_per_epoch * num_epochs) optimizer, schedule = create_optimizer(init_lr=2e-5, num_warmup_steps=0, num_train_steps=total_train_steps)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> create_optimizer <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>batch_size = <span class="hljs-number">16</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_epochs = <span class="hljs-number">5</span> <span class="hljs-meta">&gt;&gt;&gt; </span>batches_per_epoch = <span class="hljs-built_in">len</span>(tokenized_imdb[<span class="hljs-string">&quot;train&quot;</span>]) // batch_size <span class="hljs-meta">&gt;&gt;&gt; </span>total_train_steps = <span class="hljs-built_in">int</span>(batches_per_epoch * num_epochs) <span class="hljs-meta">&gt;&gt;&gt; </span>optimizer, schedule = create_optimizer(init_lr=<span class="hljs-number">2e-5</span>, num_warmup_steps=<span class="hljs-number">0</span>, num_train_steps=total_train_steps)`}}),R=new le({props:{code:`from transformers import TFAutoModelForSequenceClassification model = TFAutoModelForSequenceClassification.from_pretrained("distilbert-base-uncased", num_labels=2)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>, num_labels=<span class="hljs-number">2</span>)`}}),k=new le({props:{code:`import tensorflow as tf model.compile(optimizer=optimizer)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>model.<span class="hljs-built_in">compile</span>(optimizer=optimizer)`}}),ae=new le({props:{code:"model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=3)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=<span class="hljs-number">3</span>)'}}),{c(){s=i("p"),f=r("Para executar o fine-tuning de um modelo no TensorFlow, comece convertendo seu conjunto de dados para o formato "),o=i("code"),u=r("tf.data.Dataset"),b=r(" com "),_=i("a"),v=i("code"),z=r("to_tf_dataset"),x=r(". Nessa execu\xE7\xE3o voc\xEA dever\xE1 especificar as entradas e r\xF3tulos (no par\xE2metro "),j=i("code"),A=r("columns"),I=r("), se deseja embaralhar o conjunto de dados, o tamanho do batch e o data collator:"),Y=$(),q(M.$$.fragment),H=$(),q(O.$$.fragment),se=$(),U=i("p"),be=r("Configure o otimizador e alguns hiperpar\xE2metros de treinamento:"),fe=$(),q(F.$$.fragment),ue=$(),V=i("p"),he=r("Carregue o DistilBERT com "),G=i("code"),_e=r("TFAutoModelForSequenceClassification"),N=r(" junto com o n\xFAmero de r\xF3tulos esperados:"),K=$(),q(R.$$.fragment),Q=$(),X=i("p"),oe=r("Configure o modelo para treinamento com o m\xE9todo "),L=i("a"),ee=i("code"),W=r("compile"),$e=r(":"),d=$(),q(k.$$.fragment),te=$(),B=i("p"),re=r("Chame o m\xE9todo "),J=i("a"),Z=i("code"),ie=r("fit"),pe=r(" para executar o fine-tuning do modelo:"),ce=$(),q(ae.$$.fragment),this.h()},l(l){s=p(l,"P",{});var E=c(s);f=n(E,"Para executar o fine-tuning de um modelo no TensorFlow, comece convertendo seu conjunto de dados para o formato "),o=p(E,"CODE",{});var Ee=c(o);u=n(Ee,"tf.data.Dataset"),Ee.forEach(a),b=n(E," com "),_=p(E,"A",{href:!0,rel:!0});var Ue=c(_);v=p(Ue,"CODE",{});var Ve=c(v);z=n(Ve,"to_tf_dataset"),Ve.forEach(a),Ue.forEach(a),x=n(E,". Nessa execu\xE7\xE3o voc\xEA dever\xE1 especificar as entradas e r\xF3tulos (no par\xE2metro "),j=p(E,"CODE",{});var de=c(j);A=n(de,"columns"),de.forEach(a),I=n(E,"), se deseja embaralhar o conjunto de dados, o tamanho do batch e o data collator:"),E.forEach(a),Y=g(l),C(M.$$.fragment,l),H=g(l),C(O.$$.fragment,l),se=g(l),U=p(l,"P",{});var ze=c(U);be=n(ze,"Configure o otimizador e alguns hiperpar\xE2metros de treinamento:"),ze.forEach(a),fe=g(l),C(F.$$.fragment,l),ue=g(l),V=p(l,"P",{});var qe=c(V);he=n(qe,"Carregue o DistilBERT com "),G=p(qe,"CODE",{});var Ge=c(G);_e=n(Ge,"TFAutoModelForSequenceClassification"),Ge.forEach(a),N=n(qe," junto com o n\xFAmero de r\xF3tulos esperados:"),qe.forEach(a),K=g(l),C(R.$$.fragment,l),Q=g(l),X=p(l,"P",{});var ve=c(X);oe=n(ve,"Configure o modelo para treinamento com o m\xE9todo "),L=p(ve,"A",{href:!0,rel:!0});var He=c(L);ee=p(He,"CODE",{});var Ke=c(ee);W=n(Ke,"compile"),Ke.forEach(a),He.forEach(a),$e=n(ve,":"),ve.forEach(a),d=g(l),C(k.$$.fragment,l),te=g(l),B=p(l,"P",{});var je=c(B);re=n(je,"Chame o m\xE9todo "),J=p(je,"A",{href:!0,rel:!0});var Je=c(J);Z=p(Je,"CODE",{});var Ye=c(Z);ie=n(Ye,"fit"),Ye.forEach(a),Je.forEach(a),pe=n(je," para executar o fine-tuning do modelo:"),je.forEach(a),ce=g(l),C(ae.$$.fragment,l),this.h()},h(){w(_,"href","https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.to_tf_dataset"),w(_,"rel","nofollow"),w(L,"href","https://keras.io/api/models/model_training_apis/#compile-method"),w(L,"rel","nofollow"),w(J,"href","https://keras.io/api/models/model_training_apis/#fit-method"),w(J,"rel","nofollow")},m(l,E){m(l,s,E),t(s,f),t(s,o),t(o,u),t(s,b),t(s,_),t(_,v),t(v,z),t(s,x),t(s,j),t(j,A),t(s,I),m(l,Y,E),T(M,l,E),m(l,H,E),T(O,l,E),m(l,se,E),m(l,U,E),t(U,be),m(l,fe,E),T(F,l,E),m(l,ue,E),m(l,V,E),t(V,he),t(V,G),t(G,_e),t(V,N),m(l,K,E),T(R,l,E),m(l,Q,E),m(l,X,E),t(X,oe),t(X,L),t(L,ee),t(ee,W),t(X,$e),m(l,d,E),T(k,l,E),m(l,te,E),m(l,B,E),t(B,re),t(B,J),t(J,Z),t(Z,ie),t(B,pe),m(l,ce,E),T(ae,l,E),me=!0},p(l,E){const Ee={};E&2&&(Ee.$$scope={dirty:E,ctx:l}),O.$set(Ee)},i(l){me||(D(M.$$.fragment,l),D(O.$$.fragment,l),D(F.$$.fragment,l),D(R.$$.fragment,l),D(k.$$.fragment,l),D(ae.$$.fragment,l),me=!0)},o(l){y(M.$$.fragment,l),y(O.$$.fragment,l),y(F.$$.fragment,l),y(R.$$.fragment,l),y(k.$$.fragment,l),y(ae.$$.fragment,l),me=!1},d(l){l&&a(s),l&&a(Y),P(M,l),l&&a(H),P(O,l),l&&a(se),l&&a(U),l&&a(fe),P(F,l),l&&a(ue),l&&a(V),l&&a(K),P(R,l),l&&a(Q),l&&a(X),l&&a(d),P(k,l),l&&a(te),l&&a(B),l&&a(ce),P(ae,l)}}}function ss(S){let s,f;return s=new Ot({props:{$$slots:{default:[as]},$$scope:{ctx:S}}}),{c(){q(s.$$.fragment)},l(o){C(s.$$.fragment,o)},m(o,u){T(s,o,u),f=!0},p(o,u){const b={};u&2&&(b.$$scope={dirty:u,ctx:o}),s.$set(b)},i(o){f||(D(s.$$.fragment,o),f=!0)},o(o){y(s.$$.fragment,o),f=!1},d(o){P(s,o)}}}function os(S){let s,f,o,u,b,_,v,z;return{c(){s=i("p"),f=r("Para obter um exemplo mais aprofundado de como executar o fine-tuning de um modelo para classifica\xE7\xE3o de texto, d\xEA uma olhada nesse "),o=i("a"),u=r("notebook utilizando PyTorch"),b=r(" ou nesse "),_=i("a"),v=r("notebook utilizando TensorFlow"),z=r("."),this.h()},l(x){s=p(x,"P",{});var j=c(s);f=n(j,"Para obter um exemplo mais aprofundado de como executar o fine-tuning de um modelo para classifica\xE7\xE3o de texto, d\xEA uma olhada nesse "),o=p(j,"A",{href:!0,rel:!0});var A=c(o);u=n(A,"notebook utilizando PyTorch"),A.forEach(a),b=n(j," ou nesse "),_=p(j,"A",{href:!0,rel:!0});var I=c(_);v=n(I,"notebook utilizando TensorFlow"),I.forEach(a),z=n(j,"."),j.forEach(a),this.h()},h(){w(o,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification.ipynb"),w(o,"rel","nofollow"),w(_,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification-tf.ipynb"),w(_,"rel","nofollow")},m(x,j){m(x,s,j),t(s,f),t(s,o),t(o,u),t(s,b),t(s,_),t(_,v),t(s,z)},d(x){x&&a(s)}}}function rs(S){let s,f,o,u,b,_,v,z,x,j,A,I,Y,M,H,O,se,U,be,fe,F,ue,V,he,G,_e,N,K,R,Q,X,oe,L,ee,W,$e,d,k,te,B,re,J,Z,ie,pe,ce,ae,me,l,E,Ee,Ue,Ve,de,ze,qe,Ge,ve,He,Ke,je,Je,Ye,ht,we,Ce,Xe,Fe,Ft,Ze,Mt,_t,Te,It,et,Nt,Bt,$t,Me,gt,De,Rt,tt,Lt,Wt,bt,Ie,vt,ge,Ut,Ne,at,Vt,Gt,st,Ht,Kt,ot,Jt,Yt,jt,Be,kt,ne,Qt,rt,Xt,Zt,nt,ea,ta,lt,aa,sa,it,oa,ra,Et,ye,wt,xe,Pe,pt,Re,na,ct,la,xt,Ae,zt,Se,qt;return _=new St({}),A=new Va({props:{id:"leNG9fN9FQU"}}),G=new ut({props:{$$slots:{default:[Ga]},$$scope:{ctx:S}}}),Q=new St({}),k=new le({props:{code:`from datasets import load_dataset imdb = load_dataset("imdb")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>imdb = load_dataset(<span class="hljs-string">&quot;imdb&quot;</span>)`}}),Z=new le({props:{code:'imdb["test"][0]',highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>imdb[<span class="hljs-string">&quot;test&quot;</span>][<span class="hljs-number">0</span>] { <span class="hljs-string">&quot;label&quot;</span>: <span class="hljs-number">0</span>, <span class="hljs-string">&quot;text&quot;</span>: <span class="hljs-string">&quot;I love sci-fi and am willing to put up with a lot. Sci-fi movies/TV are usually underfunded, under-appreciated and misunderstood. I tried to like this, I really did, but it is to good TV sci-fi as Babylon 5 is to Star Trek (the original). Silly prosthetics, cheap cardboard sets, stilted dialogues, CG that doesn&#x27;t match the background, and painfully one-dimensional characters cannot be overcome with a &#x27;sci-fi&#x27; setting. (I&#x27;m sure there are those of you out there who think Babylon 5 is good sci-fi TV. It&#x27;s not. It&#x27;s clich\xE9d and uninspiring.) While US viewers might like emotion and character development, sci-fi is a genre that does not take itself seriously (cf. Star Trek). It may treat important issues, yet not as a serious philosophy. It&#x27;s really difficult to care about the characters here as they are not simply foolish, just missing a spark of life. Their actions and reactions are wooden and predictable, often painful to watch. The makers of Earth KNOW it&#x27;s rubbish as they have to always say \\&quot;Gene Roddenberry&#x27;s Earth...\\&quot; otherwise people would not continue watching. Roddenberry&#x27;s ashes must be turning in their orbit as this dull, cheap, poorly edited (watching it without advert breaks really brings this home) trudging Trabant of a show lumbers into space. Spoiler. So, kill off a main character. And then bring him back as another actor. Jeeez! Dallas all over again.&quot;</span>, }`}}),Fe=new St({}),Me=new le({props:{code:`from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)`}}),Ie=new le({props:{code:`def preprocess_function(examples): return tokenizer(examples["text"], truncation=True)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">preprocess_function</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> tokenizer(examples[<span class="hljs-string">&quot;text&quot;</span>], truncation=<span class="hljs-literal">True</span>)`}}),Be=new le({props:{code:"tokenized_imdb = imdb.map(preprocess_function, batched=True)",highlighted:'tokenized_imdb = imdb.<span class="hljs-built_in">map</span>(preprocess_function, batched=<span class="hljs-literal">True</span>)'}}),ye=new Ia({props:{pytorch:!0,tensorflow:!0,jax:!1,$$slots:{tensorflow:[Ya],pytorch:[Ka]},$$scope:{ctx:S}}}),Re=new St({}),Ae=new Ia({props:{pytorch:!0,tensorflow:!0,jax:!1,$$slots:{tensorflow:[ss],pytorch:[es]},$$scope:{ctx:S}}}),Se=new ut({props:{$$slots:{default:[os]},$$scope:{ctx:S}}}),{c(){s=i("meta"),f=$(),o=i("h1"),u=i("a"),b=i("span"),q(_.$$.fragment),v=$(),z=i("span"),x=r("Classifica\xE7\xE3o de texto"),j=$(),q(A.$$.fragment),I=$(),Y=i("p"),M=r("A classifica\xE7\xE3o de texto \xE9 uma tarefa comum de NLP que atribui um r\xF3tulo ou classe a um texto. Existem muitas aplica\xE7\xF5es pr\xE1ticas de classifica\xE7\xE3o de texto amplamente utilizadas em produ\xE7\xE3o por algumas das maiores empresas da atualidade. Uma das formas mais populares de classifica\xE7\xE3o de texto \xE9 a an\xE1lise de sentimento, que atribui um r\xF3tulo como positivo, negativo ou neutro a um texto."),H=$(),O=i("p"),se=r("Este guia mostrar\xE1 como realizar o fine-tuning do "),U=i("a"),be=r("DistilBERT"),fe=r(" no conjunto de dados "),F=i("a"),ue=r("IMDb"),V=r(" para determinar se a cr\xEDtica de filme \xE9 positiva ou negativa."),he=$(),q(G.$$.fragment),_e=$(),N=i("h2"),K=i("a"),R=i("span"),q(Q.$$.fragment),X=$(),oe=i("span"),L=r("Carregue o conjunto de dados IMDb"),ee=$(),W=i("p"),$e=r("Carregue o conjunto de dados IMDb utilizando a biblioteca \u{1F917} Datasets:"),d=$(),q(k.$$.fragment),te=$(),B=i("p"),re=r("Em seguida, d\xEA uma olhada em um exemplo:"),J=$(),q(Z.$$.fragment),ie=$(),pe=i("p"),ce=r("Existem dois campos neste dataset:"),ae=$(),me=i("ul"),l=i("li"),E=i("code"),Ee=r("text"),Ue=r(": uma string contendo o texto da cr\xEDtica do filme."),Ve=$(),de=i("li"),ze=i("code"),qe=r("label"),Ge=r(": um valor que pode ser "),ve=i("code"),He=r("0"),Ke=r(" para uma cr\xEDtica negativa ou "),je=i("code"),Je=r("1"),Ye=r(" para uma cr\xEDtica positiva."),ht=$(),we=i("h2"),Ce=i("a"),Xe=i("span"),q(Fe.$$.fragment),Ft=$(),Ze=i("span"),Mt=r("Pr\xE9-processamento dos dados"),_t=$(),Te=i("p"),It=r("Carregue o tokenizador do DistilBERT para processar o campo "),et=i("code"),Nt=r("text"),Bt=r(":"),$t=$(),q(Me.$$.fragment),gt=$(),De=i("p"),Rt=r("Crie uma fun\xE7\xE3o de pr\xE9-processamento para tokenizar o campo "),tt=i("code"),Lt=r("text"),Wt=r(" e truncar as sequ\xEAncias para que n\xE3o sejam maiores que o comprimento m\xE1ximo de entrada do DistilBERT:"),bt=$(),q(Ie.$$.fragment),vt=$(),ge=i("p"),Ut=r("Use a fun\xE7\xE3o "),Ne=i("a"),at=i("code"),Vt=r("map"),Gt=r(" do \u{1F917} Datasets para aplicar a fun\xE7\xE3o de pr\xE9-processamento em todo o conjunto de dados. Voc\xEA pode acelerar a fun\xE7\xE3o "),st=i("code"),Ht=r("map"),Kt=r(" definindo "),ot=i("code"),Jt=r("batched=True"),Yt=r(" para processar v\xE1rios elementos do conjunto de dados de uma s\xF3 vez:"),jt=$(),q(Be.$$.fragment),kt=$(),ne=i("p"),Qt=r("Use o "),rt=i("code"),Xt=r("DataCollatorWithPadding"),Zt=r(" para criar um batch de exemplos. Ele tamb\xE9m "),nt=i("em"),ea=r("preencher\xE1 dinamicamente"),ta=r(" seu texto at\xE9 o comprimento do elemento mais longo em seu batch, para que os exemplos do batch tenham um comprimento uniforme. Embora seja poss\xEDvel preencher seu texto com a fun\xE7\xE3o "),lt=i("code"),aa=r("tokenizer"),sa=r(" definindo "),it=i("code"),oa=r("padding=True"),ra=r(", o preenchimento din\xE2mico utilizando um data collator \xE9 mais eficiente."),Et=$(),q(ye.$$.fragment),wt=$(),xe=i("h2"),Pe=i("a"),pt=i("span"),q(Re.$$.fragment),na=$(),ct=i("span"),la=r("Train"),xt=$(),q(Ae.$$.fragment),zt=$(),q(Se.$$.fragment),this.h()},l(e){const h=Wa('[data-svelte="svelte-1phssyn"]',document.head);s=p(h,"META",{name:!0,content:!0}),h.forEach(a),f=g(e),o=p(e,"H1",{class:!0});var Le=c(o);u=p(Le,"A",{id:!0,class:!0,href:!0});var mt=c(u);b=p(mt,"SPAN",{});var dt=c(b);C(_.$$.fragment,dt),dt.forEach(a),mt.forEach(a),v=g(Le),z=p(Le,"SPAN",{});var ft=c(z);x=n(ft,"Classifica\xE7\xE3o de texto"),ft.forEach(a),Le.forEach(a),j=g(e),C(A.$$.fragment,e),I=g(e),Y=p(e,"P",{});var pa=c(Y);M=n(pa,"A classifica\xE7\xE3o de texto \xE9 uma tarefa comum de NLP que atribui um r\xF3tulo ou classe a um texto. Existem muitas aplica\xE7\xF5es pr\xE1ticas de classifica\xE7\xE3o de texto amplamente utilizadas em produ\xE7\xE3o por algumas das maiores empresas da atualidade. Uma das formas mais populares de classifica\xE7\xE3o de texto \xE9 a an\xE1lise de sentimento, que atribui um r\xF3tulo como positivo, negativo ou neutro a um texto."),pa.forEach(a),H=g(e),O=p(e,"P",{});var Qe=c(O);se=n(Qe,"Este guia mostrar\xE1 como realizar o fine-tuning do "),U=p(Qe,"A",{href:!0,rel:!0});var ca=c(U);be=n(ca,"DistilBERT"),ca.forEach(a),fe=n(Qe," no conjunto de dados "),F=p(Qe,"A",{href:!0,rel:!0});var ma=c(F);ue=n(ma,"IMDb"),ma.forEach(a),V=n(Qe," para determinar se a cr\xEDtica de filme \xE9 positiva ou negativa."),Qe.forEach(a),he=g(e),C(G.$$.fragment,e),_e=g(e),N=p(e,"H2",{class:!0});var Ct=c(N);K=p(Ct,"A",{id:!0,class:!0,href:!0});var da=c(K);R=p(da,"SPAN",{});var fa=c(R);C(Q.$$.fragment,fa),fa.forEach(a),da.forEach(a),X=g(Ct),oe=p(Ct,"SPAN",{});var ua=c(oe);L=n(ua,"Carregue o conjunto de dados IMDb"),ua.forEach(a),Ct.forEach(a),ee=g(e),W=p(e,"P",{});var ha=c(W);$e=n(ha,"Carregue o conjunto de dados IMDb utilizando a biblioteca \u{1F917} Datasets:"),ha.forEach(a),d=g(e),C(k.$$.fragment,e),te=g(e),B=p(e,"P",{});var _a=c(B);re=n(_a,"Em seguida, d\xEA uma olhada em um exemplo:"),_a.forEach(a),J=g(e),C(Z.$$.fragment,e),ie=g(e),pe=p(e,"P",{});var $a=c(pe);ce=n($a,"Existem dois campos neste dataset:"),$a.forEach(a),ae=g(e),me=p(e,"UL",{});var Tt=c(me);l=p(Tt,"LI",{});var ia=c(l);E=p(ia,"CODE",{});var ga=c(E);Ee=n(ga,"text"),ga.forEach(a),Ue=n(ia,": uma string contendo o texto da cr\xEDtica do filme."),ia.forEach(a),Ve=g(Tt),de=p(Tt,"LI",{});var We=c(de);ze=p(We,"CODE",{});var ba=c(ze);qe=n(ba,"label"),ba.forEach(a),Ge=n(We,": um valor que pode ser "),ve=p(We,"CODE",{});var va=c(ve);He=n(va,"0"),va.forEach(a),Ke=n(We," para uma cr\xEDtica negativa ou "),je=p(We,"CODE",{});var ja=c(je);Je=n(ja,"1"),ja.forEach(a),Ye=n(We," para uma cr\xEDtica positiva."),We.forEach(a),Tt.forEach(a),ht=g(e),we=p(e,"H2",{class:!0});var Dt=c(we);Ce=p(Dt,"A",{id:!0,class:!0,href:!0});var ka=c(Ce);Xe=p(ka,"SPAN",{});var Ea=c(Xe);C(Fe.$$.fragment,Ea),Ea.forEach(a),ka.forEach(a),Ft=g(Dt),Ze=p(Dt,"SPAN",{});var wa=c(Ze);Mt=n(wa,"Pr\xE9-processamento dos dados"),wa.forEach(a),Dt.forEach(a),_t=g(e),Te=p(e,"P",{});var yt=c(Te);It=n(yt,"Carregue o tokenizador do DistilBERT para processar o campo "),et=p(yt,"CODE",{});var xa=c(et);Nt=n(xa,"text"),xa.forEach(a),Bt=n(yt,":"),yt.forEach(a),$t=g(e),C(Me.$$.fragment,e),gt=g(e),De=p(e,"P",{});var Pt=c(De);Rt=n(Pt,"Crie uma fun\xE7\xE3o de pr\xE9-processamento para tokenizar o campo "),tt=p(Pt,"CODE",{});var za=c(tt);Lt=n(za,"text"),za.forEach(a),Wt=n(Pt," e truncar as sequ\xEAncias para que n\xE3o sejam maiores que o comprimento m\xE1ximo de entrada do DistilBERT:"),Pt.forEach(a),bt=g(e),C(Ie.$$.fragment,e),vt=g(e),ge=p(e,"P",{});var Oe=c(ge);Ut=n(Oe,"Use a fun\xE7\xE3o "),Ne=p(Oe,"A",{href:!0,rel:!0});var qa=c(Ne);at=p(qa,"CODE",{});var Ca=c(at);Vt=n(Ca,"map"),Ca.forEach(a),qa.forEach(a),Gt=n(Oe," do \u{1F917} Datasets para aplicar a fun\xE7\xE3o de pr\xE9-processamento em todo o conjunto de dados. Voc\xEA pode acelerar a fun\xE7\xE3o "),st=p(Oe,"CODE",{});var Ta=c(st);Ht=n(Ta,"map"),Ta.forEach(a),Kt=n(Oe," definindo "),ot=p(Oe,"CODE",{});var Da=c(ot);Jt=n(Da,"batched=True"),Da.forEach(a),Yt=n(Oe," para processar v\xE1rios elementos do conjunto de dados de uma s\xF3 vez:"),Oe.forEach(a),jt=g(e),C(Be.$$.fragment,e),kt=g(e),ne=p(e,"P",{});var ke=c(ne);Qt=n(ke,"Use o "),rt=p(ke,"CODE",{});var ya=c(rt);Xt=n(ya,"DataCollatorWithPadding"),ya.forEach(a),Zt=n(ke," para criar um batch de exemplos. Ele tamb\xE9m "),nt=p(ke,"EM",{});var Pa=c(nt);ea=n(Pa,"preencher\xE1 dinamicamente"),Pa.forEach(a),ta=n(ke," seu texto at\xE9 o comprimento do elemento mais longo em seu batch, para que os exemplos do batch tenham um comprimento uniforme. Embora seja poss\xEDvel preencher seu texto com a fun\xE7\xE3o "),lt=p(ke,"CODE",{});var Aa=c(lt);aa=n(Aa,"tokenizer"),Aa.forEach(a),sa=n(ke," definindo "),it=p(ke,"CODE",{});var Sa=c(it);oa=n(Sa,"padding=True"),Sa.forEach(a),ra=n(ke,", o preenchimento din\xE2mico utilizando um data collator \xE9 mais eficiente."),ke.forEach(a),Et=g(e),C(ye.$$.fragment,e),wt=g(e),xe=p(e,"H2",{class:!0});var At=c(xe);Pe=p(At,"A",{id:!0,class:!0,href:!0});var Oa=c(Pe);pt=p(Oa,"SPAN",{});var Fa=c(pt);C(Re.$$.fragment,Fa),Fa.forEach(a),Oa.forEach(a),na=g(At),ct=p(At,"SPAN",{});var Ma=c(ct);la=n(Ma,"Train"),Ma.forEach(a),At.forEach(a),xt=g(e),C(Ae.$$.fragment,e),zt=g(e),C(Se.$$.fragment,e),this.h()},h(){w(s,"name","hf:doc:metadata"),w(s,"content",JSON.stringify(ns)),w(u,"id","classificao-de-texto"),w(u,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),w(u,"href","#classificao-de-texto"),w(o,"class","relative group"),w(U,"href","https://huggingface.co/distilbert-base-uncased"),w(U,"rel","nofollow"),w(F,"href","https://huggingface.co/datasets/imdb"),w(F,"rel","nofollow"),w(K,"id","carregue-o-conjunto-de-dados-imdb"),w(K,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),w(K,"href","#carregue-o-conjunto-de-dados-imdb"),w(N,"class","relative group"),w(Ce,"id","prprocessamento-dos-dados"),w(Ce,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),w(Ce,"href","#prprocessamento-dos-dados"),w(we,"class","relative group"),w(Ne,"href","https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map"),w(Ne,"rel","nofollow"),w(Pe,"id","train"),w(Pe,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),w(Pe,"href","#train"),w(xe,"class","relative group")},m(e,h){t(document.head,s),m(e,f,h),m(e,o,h),t(o,u),t(u,b),T(_,b,null),t(o,v),t(o,z),t(z,x),m(e,j,h),T(A,e,h),m(e,I,h),m(e,Y,h),t(Y,M),m(e,H,h),m(e,O,h),t(O,se),t(O,U),t(U,be),t(O,fe),t(O,F),t(F,ue),t(O,V),m(e,he,h),T(G,e,h),m(e,_e,h),m(e,N,h),t(N,K),t(K,R),T(Q,R,null),t(N,X),t(N,oe),t(oe,L),m(e,ee,h),m(e,W,h),t(W,$e),m(e,d,h),T(k,e,h),m(e,te,h),m(e,B,h),t(B,re),m(e,J,h),T(Z,e,h),m(e,ie,h),m(e,pe,h),t(pe,ce),m(e,ae,h),m(e,me,h),t(me,l),t(l,E),t(E,Ee),t(l,Ue),t(me,Ve),t(me,de),t(de,ze),t(ze,qe),t(de,Ge),t(de,ve),t(ve,He),t(de,Ke),t(de,je),t(je,Je),t(de,Ye),m(e,ht,h),m(e,we,h),t(we,Ce),t(Ce,Xe),T(Fe,Xe,null),t(we,Ft),t(we,Ze),t(Ze,Mt),m(e,_t,h),m(e,Te,h),t(Te,It),t(Te,et),t(et,Nt),t(Te,Bt),m(e,$t,h),T(Me,e,h),m(e,gt,h),m(e,De,h),t(De,Rt),t(De,tt),t(tt,Lt),t(De,Wt),m(e,bt,h),T(Ie,e,h),m(e,vt,h),m(e,ge,h),t(ge,Ut),t(ge,Ne),t(Ne,at),t(at,Vt),t(ge,Gt),t(ge,st),t(st,Ht),t(ge,Kt),t(ge,ot),t(ot,Jt),t(ge,Yt),m(e,jt,h),T(Be,e,h),m(e,kt,h),m(e,ne,h),t(ne,Qt),t(ne,rt),t(rt,Xt),t(ne,Zt),t(ne,nt),t(nt,ea),t(ne,ta),t(ne,lt),t(lt,aa),t(ne,sa),t(ne,it),t(it,oa),t(ne,ra),m(e,Et,h),T(ye,e,h),m(e,wt,h),m(e,xe,h),t(xe,Pe),t(Pe,pt),T(Re,pt,null),t(xe,na),t(xe,ct),t(ct,la),m(e,xt,h),T(Ae,e,h),m(e,zt,h),T(Se,e,h),qt=!0},p(e,[h]){const Le={};h&2&&(Le.$$scope={dirty:h,ctx:e}),G.$set(Le);const mt={};h&2&&(mt.$$scope={dirty:h,ctx:e}),ye.$set(mt);const dt={};h&2&&(dt.$$scope={dirty:h,ctx:e}),Ae.$set(dt);const ft={};h&2&&(ft.$$scope={dirty:h,ctx:e}),Se.$set(ft)},i(e){qt||(D(_.$$.fragment,e),D(A.$$.fragment,e),D(G.$$.fragment,e),D(Q.$$.fragment,e),D(k.$$.fragment,e),D(Z.$$.fragment,e),D(Fe.$$.fragment,e),D(Me.$$.fragment,e),D(Ie.$$.fragment,e),D(Be.$$.fragment,e),D(ye.$$.fragment,e),D(Re.$$.fragment,e),D(Ae.$$.fragment,e),D(Se.$$.fragment,e),qt=!0)},o(e){y(_.$$.fragment,e),y(A.$$.fragment,e),y(G.$$.fragment,e),y(Q.$$.fragment,e),y(k.$$.fragment,e),y(Z.$$.fragment,e),y(Fe.$$.fragment,e),y(Me.$$.fragment,e),y(Ie.$$.fragment,e),y(Be.$$.fragment,e),y(ye.$$.fragment,e),y(Re.$$.fragment,e),y(Ae.$$.fragment,e),y(Se.$$.fragment,e),qt=!1},d(e){a(s),e&&a(f),e&&a(o),P(_),e&&a(j),P(A,e),e&&a(I),e&&a(Y),e&&a(H),e&&a(O),e&&a(he),P(G,e),e&&a(_e),e&&a(N),P(Q),e&&a(ee),e&&a(W),e&&a(d),P(k,e),e&&a(te),e&&a(B),e&&a(J),P(Z,e),e&&a(ie),e&&a(pe),e&&a(ae),e&&a(me),e&&a(ht),e&&a(we),P(Fe),e&&a(_t),e&&a(Te),e&&a($t),P(Me,e),e&&a(gt),e&&a(De),e&&a(bt),P(Ie,e),e&&a(vt),e&&a(ge),e&&a(jt),P(Be,e),e&&a(kt),e&&a(ne),e&&a(Et),P(ye,e),e&&a(wt),e&&a(xe),P(Re),e&&a(xt),P(Ae,e),e&&a(zt),P(Se,e)}}}const ns={local:"classificao-de-texto",sections:[{local:"carregue-o-conjunto-de-dados-imdb",title:"Carregue o conjunto de dados IMDb"},{local:"prprocessamento-dos-dados",title:"Pr\xE9-processamento dos dados"},{local:"train",title:"Train"}],title:"Classifica\xE7\xE3o de texto"};function ls(S){return Ua(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class us extends Ba{constructor(s){super();Ra(this,s,ls,rs,La,{})}}export{us as default,ns as metadata};
488
0
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/_app/pages
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/_app/pages/tasks/token_classification.mdx-hf-doc-builder.js
import{S as ho,i as _o,s as go,e as r,k as _,w as x,t as n,M as $o,c as i,d as e,m as g,a as p,x as E,h as l,b as k,G as a,g as d,y as q,q as z,o as C,B as T,v as jo,L as fo}from"../../chunks/vendor-hf-doc-builder.js";import{T as he}from"../../chunks/Tip-hf-doc-builder.js";import{Y as mo}from"../../chunks/Youtube-hf-doc-builder.js";import{I as fe}from"../../chunks/IconCopyLink-hf-doc-builder.js";import{C as as}from"../../chunks/CodeBlock-hf-doc-builder.js";import{F as uo,M as _e}from"../../chunks/Markdown-hf-doc-builder.js";function ko(P){let t,u,o,f,j;return{c(){t=r("p"),u=n("Consulte a "),o=r("a"),f=n("p\xE1gina de tarefas de classifica\xE7\xE3o de tokens"),j=n(" para obter mais informa\xE7\xF5es sobre outras formas de classifica\xE7\xE3o de tokens e seus modelos, conjuntos de dados e m\xE9tricas associadas."),this.h()},l($){t=i($,"P",{});var v=p(t);u=l(v,"Consulte a "),o=i(v,"A",{href:!0,rel:!0});var D=p(o);f=l(D,"p\xE1gina de tarefas de classifica\xE7\xE3o de tokens"),D.forEach(e),j=l(v," para obter mais informa\xE7\xF5es sobre outras formas de classifica\xE7\xE3o de tokens e seus modelos, conjuntos de dados e m\xE9tricas associadas."),v.forEach(e),this.h()},h(){k(o,"href","https://huggingface.co/tasks/token-classification"),k(o,"rel","nofollow")},m($,v){d($,t,v),a(t,u),a(t,o),a(o,f),a(t,j)},d($){$&&e(t)}}}function vo(P){let t,u;return t=new as({props:{code:`from transformers import DataCollatorForTokenClassification data_collator = DataCollatorForTokenClassification(tokenizer=tokenizer)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorForTokenClassification(tokenizer=tokenizer)`}}),{c(){x(t.$$.fragment)},l(o){E(t.$$.fragment,o)},m(o,f){q(t,o,f),u=!0},p:fo,i(o){u||(z(t.$$.fragment,o),u=!0)},o(o){C(t.$$.fragment,o),u=!1},d(o){T(t,o)}}}function bo(P){let t,u;return t=new _e({props:{$$slots:{default:[vo]},$$scope:{ctx:P}}}),{c(){x(t.$$.fragment)},l(o){E(t.$$.fragment,o)},m(o,f){q(t,o,f),u=!0},p(o,f){const j={};f&2&&(j.$$scope={dirty:f,ctx:o}),t.$set(j)},i(o){u||(z(t.$$.fragment,o),u=!0)},o(o){C(t.$$.fragment,o),u=!1},d(o){T(t,o)}}}function wo(P){let t,u;return t=new as({props:{code:`from transformers import DataCollatorForTokenClassification data_collator = DataCollatorForTokenClassification(tokenizer=tokenizer, return_tensors="tf")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorForTokenClassification(tokenizer=tokenizer, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)`}}),{c(){x(t.$$.fragment)},l(o){E(t.$$.fragment,o)},m(o,f){q(t,o,f),u=!0},p:fo,i(o){u||(z(t.$$.fragment,o),u=!0)},o(o){C(t.$$.fragment,o),u=!1},d(o){T(t,o)}}}function xo(P){let t,u;return t=new _e({props:{$$slots:{default:[wo]},$$scope:{ctx:P}}}),{c(){x(t.$$.fragment)},l(o){E(t.$$.fragment,o)},m(o,f){q(t,o,f),u=!0},p(o,f){const j={};f&2&&(j.$$scope={dirty:f,ctx:o}),t.$set(j)},i(o){u||(z(t.$$.fragment,o),u=!0)},o(o){C(t.$$.fragment,o),u=!1},d(o){T(t,o)}}}function Eo(P){let t,u,o,f,j,$,v,D;return{c(){t=r("p"),u=n("Se voc\xEA n\xE3o estiver familiarizado com o fine-tuning de um modelo com o "),o=r("code"),f=n("Trainer"),j=n(", d\xEA uma olhada no tutorial b\xE1sico "),$=r("a"),v=n("aqui"),D=n("!"),this.h()},l(A){t=i(A,"P",{});var w=p(t);u=l(w,"Se voc\xEA n\xE3o estiver familiarizado com o fine-tuning de um modelo com o "),o=i(w,"CODE",{});var O=p(o);f=l(O,"Trainer"),O.forEach(e),j=l(w,", d\xEA uma olhada no tutorial b\xE1sico "),$=i(w,"A",{href:!0});var L=p($);v=l(L,"aqui"),L.forEach(e),D=l(w,"!"),w.forEach(e),this.h()},h(){k($,"href","../training#finetune-with-trainer")},m(A,w){d(A,t,w),a(t,u),a(t,o),a(o,f),a(t,j),a(t,$),a($,v),a(t,D)},d(A){A&&e(t)}}}function qo(P){let t,u,o,f,j,$,v,D,A,w,O,L,G,N,Y,F,ns,U,$s,us,S,fs,W,hs,V,_s,B,K,I,J,Q,ls,R,es;return v=new as({props:{code:`from transformers import AutoModelForTokenClassification, TrainingArguments, Trainer model = AutoModelForTokenClassification.from_pretrained("distilbert-base-uncased", num_labels=14)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForTokenClassification, TrainingArguments, Trainer <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForTokenClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>, num_labels=<span class="hljs-number">14</span>)`}}),A=new he({props:{$$slots:{default:[Eo]},$$scope:{ctx:P}}}),R=new as({props:{code:`training_args = TrainingArguments( output_dir="./results", evaluation_strategy="epoch", learning_rate=2e-5, per_device_train_batch_size=16, per_device_eval_batch_size=16, num_train_epochs=3, weight_decay=0.01, ) trainer = Trainer( model=model, args=training_args, train_dataset=tokenized_wnut["train"], eval_dataset=tokenized_wnut["test"], tokenizer=tokenizer, data_collator=data_collator, ) trainer.train()`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> evaluation_strategy=<span class="hljs-string">&quot;epoch&quot;</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">2e-5</span>, <span class="hljs-meta">... </span> per_device_train_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> per_device_eval_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> num_train_epochs=<span class="hljs-number">3</span>, <span class="hljs-meta">... </span> weight_decay=<span class="hljs-number">0.01</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=tokenized_wnut[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=tokenized_wnut[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> tokenizer=tokenizer, <span class="hljs-meta">... </span> data_collator=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()`}}),{c(){t=r("p"),u=n("Carregue o DistilBERT com o "),o=r("code"),f=n("AutoModelForTokenClassification"),j=n(" junto com o n\xFAmero de r\xF3tulos esperados:"),$=_(),x(v.$$.fragment),D=_(),x(A.$$.fragment),w=_(),O=r("p"),L=n("Nesse ponto, restam apenas tr\xEAs passos:"),G=_(),N=r("ol"),Y=r("li"),F=n("Definir seus hiperpar\xE2metros de treinamento em "),ns=r("code"),U=n("TrainingArguments"),$s=n("."),us=_(),S=r("li"),fs=n("Passar os argumentos de treinamento para o "),W=r("code"),hs=n("Trainer"),V=n(" junto com o modelo, conjunto de dados, tokenizador e o data collator."),_s=_(),B=r("li"),K=n("Chamar a fun\xE7\xE3o "),I=r("code"),J=n("train()"),Q=n(" para executar o fine-tuning do seu modelo."),ls=_(),x(R.$$.fragment)},l(h){t=i(h,"P",{});var y=p(t);u=l(y,"Carregue o DistilBERT com o "),o=i(y,"CODE",{});var ts=p(o);f=l(ts,"AutoModelForTokenClassification"),ts.forEach(e),j=l(y," junto com o n\xFAmero de r\xF3tulos esperados:"),y.forEach(e),$=g(h),E(v.$$.fragment,h),D=g(h),E(A.$$.fragment,h),w=g(h),O=i(h,"P",{});var H=p(O);L=l(H,"Nesse ponto, restam apenas tr\xEAs passos:"),H.forEach(e),G=g(h),N=i(h,"OL",{});var X=p(N);Y=i(X,"LI",{});var M=p(Y);F=l(M,"Definir seus hiperpar\xE2metros de treinamento em "),ns=i(M,"CODE",{});var ks=p(ns);U=l(ks,"TrainingArguments"),ks.forEach(e),$s=l(M,"."),M.forEach(e),us=g(X),S=i(X,"LI",{});var Z=p(S);fs=l(Z,"Passar os argumentos de treinamento para o "),W=i(Z,"CODE",{});var ss=p(W);hs=l(ss,"Trainer"),ss.forEach(e),V=l(Z," junto com o modelo, conjunto de dados, tokenizador e o data collator."),Z.forEach(e),_s=g(X),B=i(X,"LI",{});var is=p(B);K=l(is,"Chamar a fun\xE7\xE3o "),I=i(is,"CODE",{});var os=p(I);J=l(os,"train()"),os.forEach(e),Q=l(is," para executar o fine-tuning do seu modelo."),is.forEach(e),X.forEach(e),ls=g(h),E(R.$$.fragment,h)},m(h,y){d(h,t,y),a(t,u),a(t,o),a(o,f),a(t,j),d(h,$,y),q(v,h,y),d(h,D,y),q(A,h,y),d(h,w,y),d(h,O,y),a(O,L),d(h,G,y),d(h,N,y),a(N,Y),a(Y,F),a(Y,ns),a(ns,U),a(Y,$s),a(N,us),a(N,S),a(S,fs),a(S,W),a(W,hs),a(S,V),a(N,_s),a(N,B),a(B,K),a(B,I),a(I,J),a(B,Q),d(h,ls,y),q(R,h,y),es=!0},p(h,y){const ts={};y&2&&(ts.$$scope={dirty:y,ctx:h}),A.$set(ts)},i(h){es||(z(v.$$.fragment,h),z(A.$$.fragment,h),z(R.$$.fragment,h),es=!0)},o(h){C(v.$$.fragment,h),C(A.$$.fragment,h),C(R.$$.fragment,h),es=!1},d(h){h&&e(t),h&&e($),T(v,h),h&&e(D),T(A,h),h&&e(w),h&&e(O),h&&e(G),h&&e(N),h&&e(ls),T(R,h)}}}function zo(P){let t,u;return t=new _e({props:{$$slots:{default:[qo]},$$scope:{ctx:P}}}),{c(){x(t.$$.fragment)},l(o){E(t.$$.fragment,o)},m(o,f){q(t,o,f),u=!0},p(o,f){const j={};f&2&&(j.$$scope={dirty:f,ctx:o}),t.$set(j)},i(o){u||(z(t.$$.fragment,o),u=!0)},o(o){C(t.$$.fragment,o),u=!1},d(o){T(t,o)}}}function Co(P){let t,u,o,f,j;return{c(){t=r("p"),u=n("Se voc\xEA n\xE3o estiver familiarizado com o fine-tuning de um modelo com o Keras, d\xEA uma olhada no tutorial b\xE1sico "),o=r("a"),f=n("aqui"),j=n("!"),this.h()},l($){t=i($,"P",{});var v=p(t);u=l(v,"Se voc\xEA n\xE3o estiver familiarizado com o fine-tuning de um modelo com o Keras, d\xEA uma olhada no tutorial b\xE1sico "),o=i(v,"A",{href:!0});var D=p(o);f=l(D,"aqui"),D.forEach(e),j=l(v,"!"),v.forEach(e),this.h()},h(){k(o,"href","training#finetune-with-keras")},m($,v){d($,t,v),a(t,u),a(t,o),a(o,f),a(t,j)},d($){$&&e(t)}}}function To(P){let t,u,o,f,j,$,v,D,A,w,O,L,G,N,Y,F,ns,U,$s,us,S,fs,W,hs,V,_s,B,K,I,J,Q,ls,R,es,h,y,ts,H,X,M,ks,Z,ss,is,os,Ds,ps,As;return N=new as({props:{code:`tf_train_set = tokenized_wnut["train"].to_tf_dataset( columns=["attention_mask", "input_ids", "labels"], shuffle=True, batch_size=16, collate_fn=data_collator, ) tf_validation_set = tokenized_wnut["validation"].to_tf_dataset( columns=["attention_mask", "input_ids", "labels"], shuffle=False, batch_size=16, collate_fn=data_collator, )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>tf_train_set = tokenized_wnut[<span class="hljs-string">&quot;train&quot;</span>].to_tf_dataset( <span class="hljs-meta">... </span> columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;labels&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_validation_set = tokenized_wnut[<span class="hljs-string">&quot;validation&quot;</span>].to_tf_dataset( <span class="hljs-meta">... </span> columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;labels&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>)`}}),F=new he({props:{$$slots:{default:[Co]},$$scope:{ctx:P}}}),S=new as({props:{code:`from transformers import create_optimizer batch_size = 16 num_train_epochs = 3 num_train_steps = (len(tokenized_wnut["train"]) // batch_size) * num_train_epochs optimizer, lr_schedule = create_optimizer( init_lr=2e-5, num_train_steps=num_train_steps, weight_decay_rate=0.01, num_warmup_steps=0, )`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> create_optimizer <span class="hljs-meta">&gt;&gt;&gt; </span>batch_size = <span class="hljs-number">16</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_train_epochs = <span class="hljs-number">3</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_train_steps = (<span class="hljs-built_in">len</span>(tokenized_wnut[<span class="hljs-string">&quot;train&quot;</span>]) // batch_size) * num_train_epochs <span class="hljs-meta">&gt;&gt;&gt; </span>optimizer, lr_schedule = create_optimizer( <span class="hljs-meta">... </span> init_lr=<span class="hljs-number">2e-5</span>, <span class="hljs-meta">... </span> num_train_steps=num_train_steps, <span class="hljs-meta">... </span> weight_decay_rate=<span class="hljs-number">0.01</span>, <span class="hljs-meta">... </span> num_warmup_steps=<span class="hljs-number">0</span>, <span class="hljs-meta">... </span>)`}}),I=new as({props:{code:`from transformers import TFAutoModelForTokenClassification model = TFAutoModelForTokenClassification.from_pretrained("distilbert-base-uncased", num_labels=2)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForTokenClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>, num_labels=<span class="hljs-number">2</span>)`}}),H=new as({props:{code:`import tensorflow as tf model.compile(optimizer=optimizer)`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>model.<span class="hljs-built_in">compile</span>(optimizer=optimizer)`}}),ps=new as({props:{code:"model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=3)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=<span class="hljs-number">3</span>)'}}),{c(){t=r("p"),u=n("Para executar o fine-tuning de um modelo no TensorFlow, comece convertendo seu conjunto de dados para o formato "),o=r("code"),f=n("tf.data.Dataset"),j=n(" com "),$=r("a"),v=r("code"),D=n("to_tf_dataset"),A=n(". Nessa execu\xE7\xE3o voc\xEA dever\xE1 especificar as entradas e r\xF3tulos (no par\xE2metro "),w=r("code"),O=n("columns"),L=n("), se deseja embaralhar o conjunto de dados, o tamanho do batch e o data collator:"),G=_(),x(N.$$.fragment),Y=_(),x(F.$$.fragment),ns=_(),U=r("p"),$s=n("Configure o otimizador e alguns hiperpar\xE2metros de treinamento:"),us=_(),x(S.$$.fragment),fs=_(),W=r("p"),hs=n("Carregue o DistilBERT com o "),V=r("code"),_s=n("TFAutoModelForTokenClassification"),B=n(" junto com o n\xFAmero de r\xF3tulos esperados:"),K=_(),x(I.$$.fragment),J=_(),Q=r("p"),ls=n("Configure o modelo para treinamento com o m\xE9todo "),R=r("a"),es=r("code"),h=n("compile"),y=n(":"),ts=_(),x(H.$$.fragment),X=_(),M=r("p"),ks=n("Chame o m\xE9todo "),Z=r("a"),ss=r("code"),is=n("fit"),os=n(" para executar o fine-tuning do modelo:"),Ds=_(),x(ps.$$.fragment),this.h()},l(c){t=i(c,"P",{});var b=p(t);u=l(b,"Para executar o fine-tuning de um modelo no TensorFlow, comece convertendo seu conjunto de dados para o formato "),o=i(b,"CODE",{});var cs=p(o);f=l(cs,"tf.data.Dataset"),cs.forEach(e),j=l(b," com "),$=i(b,"A",{href:!0,rel:!0});var Us=p($);v=i(Us,"CODE",{});var ds=p(v);D=l(ds,"to_tf_dataset"),ds.forEach(e),Us.forEach(e),A=l(b,". Nessa execu\xE7\xE3o voc\xEA dever\xE1 especificar as entradas e r\xF3tulos (no par\xE2metro "),w=i(b,"CODE",{});var ta=p(w);O=l(ta,"columns"),ta.forEach(e),L=l(b,"), se deseja embaralhar o conjunto de dados, o tamanho do batch e o data collator:"),b.forEach(e),G=g(c),E(N.$$.fragment,c),Y=g(c),E(F.$$.fragment,c),ns=g(c),U=i(c,"P",{});var ys=p(U);$s=l(ys,"Configure o otimizador e alguns hiperpar\xE2metros de treinamento:"),ys.forEach(e),us=g(c),E(S.$$.fragment,c),fs=g(c),W=i(c,"P",{});var Ps=p(W);hs=l(Ps,"Carregue o DistilBERT com o "),V=i(Ps,"CODE",{});var oa=p(V);_s=l(oa,"TFAutoModelForTokenClassification"),oa.forEach(e),B=l(Ps," junto com o n\xFAmero de r\xF3tulos esperados:"),Ps.forEach(e),K=g(c),E(I.$$.fragment,c),J=g(c),Q=i(c,"P",{});var vs=p(Q);ls=l(vs,"Configure o modelo para treinamento com o m\xE9todo "),R=i(vs,"A",{href:!0,rel:!0});var na=p(R);es=i(na,"CODE",{});var la=p(es);h=l(la,"compile"),la.forEach(e),na.forEach(e),y=l(vs,":"),vs.forEach(e),ts=g(c),E(H.$$.fragment,c),X=g(c),M=i(c,"P",{});var qs=p(M);ks=l(qs,"Chame o m\xE9todo "),Z=i(qs,"A",{href:!0,rel:!0});var ms=p(Z);ss=i(ms,"CODE",{});var zs=p(ss);is=l(zs,"fit"),zs.forEach(e),ms.forEach(e),os=l(qs," para executar o fine-tuning do modelo:"),qs.forEach(e),Ds=g(c),E(ps.$$.fragment,c),this.h()},h(){k($,"href","https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.to_tf_dataset"),k($,"rel","nofollow"),k(R,"href","https://keras.io/api/models/model_training_apis/#compile-method"),k(R,"rel","nofollow"),k(Z,"href","https://keras.io/api/models/model_training_apis/#fit-method"),k(Z,"rel","nofollow")},m(c,b){d(c,t,b),a(t,u),a(t,o),a(o,f),a(t,j),a(t,$),a($,v),a(v,D),a(t,A),a(t,w),a(w,O),a(t,L),d(c,G,b),q(N,c,b),d(c,Y,b),q(F,c,b),d(c,ns,b),d(c,U,b),a(U,$s),d(c,us,b),q(S,c,b),d(c,fs,b),d(c,W,b),a(W,hs),a(W,V),a(V,_s),a(W,B),d(c,K,b),q(I,c,b),d(c,J,b),d(c,Q,b),a(Q,ls),a(Q,R),a(R,es),a(es,h),a(Q,y),d(c,ts,b),q(H,c,b),d(c,X,b),d(c,M,b),a(M,ks),a(M,Z),a(Z,ss),a(ss,is),a(M,os),d(c,Ds,b),q(ps,c,b),As=!0},p(c,b){const cs={};b&2&&(cs.$$scope={dirty:b,ctx:c}),F.$set(cs)},i(c){As||(z(N.$$.fragment,c),z(F.$$.fragment,c),z(S.$$.fragment,c),z(I.$$.fragment,c),z(H.$$.fragment,c),z(ps.$$.fragment,c),As=!0)},o(c){C(N.$$.fragment,c),C(F.$$.fragment,c),C(S.$$.fragment,c),C(I.$$.fragment,c),C(H.$$.fragment,c),C(ps.$$.fragment,c),As=!1},d(c){c&&e(t),c&&e(G),T(N,c),c&&e(Y),T(F,c),c&&e(ns),c&&e(U),c&&e(us),T(S,c),c&&e(fs),c&&e(W),c&&e(K),T(I,c),c&&e(J),c&&e(Q),c&&e(ts),T(H,c),c&&e(X),c&&e(M),c&&e(Ds),T(ps,c)}}}function Do(P){let t,u;return t=new _e({props:{$$slots:{default:[To]},$$scope:{ctx:P}}}),{c(){x(t.$$.fragment)},l(o){E(t.$$.fragment,o)},m(o,f){q(t,o,f),u=!0},p(o,f){const j={};f&2&&(j.$$scope={dirty:f,ctx:o}),t.$set(j)},i(o){u||(z(t.$$.fragment,o),u=!0)},o(o){C(t.$$.fragment,o),u=!1},d(o){T(t,o)}}}function Ao(P){let t,u,o,f,j,$,v,D;return{c(){t=r("p"),u=n("Para obter um exemplo mais aprofundado de como executar o fine-tuning de um modelo para classifica\xE7\xE3o de tokens, d\xEA uma olhada nesse "),o=r("a"),f=n("notebook utilizando PyTorch"),j=n(" ou nesse "),$=r("a"),v=n("notebook utilizando TensorFlow"),D=n("."),this.h()},l(A){t=i(A,"P",{});var w=p(t);u=l(w,"Para obter um exemplo mais aprofundado de como executar o fine-tuning de um modelo para classifica\xE7\xE3o de tokens, d\xEA uma olhada nesse "),o=i(w,"A",{href:!0,rel:!0});var O=p(o);f=l(O,"notebook utilizando PyTorch"),O.forEach(e),j=l(w," ou nesse "),$=i(w,"A",{href:!0,rel:!0});var L=p($);v=l(L,"notebook utilizando TensorFlow"),L.forEach(e),D=l(w,"."),w.forEach(e),this.h()},h(){k(o,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification.ipynb"),k(o,"rel","nofollow"),k($,"href","https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification-tf.ipynb"),k($,"rel","nofollow")},m(A,w){d(A,t,w),a(t,u),a(t,o),a(o,f),a(t,j),a(t,$),a($,v),a(t,D)},d(A){A&&e(t)}}}function yo(P){let t,u,o,f,j,$,v,D,A,w,O,L,G,N,Y,F,ns,U,$s,us,S,fs,W,hs,V,_s,B,K,I,J,Q,ls,R,es,h,y,ts,H,X,M,ks,Z,ss,is,os,Ds,ps,As,c,b,cs,Us,ds,ta,ys,Ps,oa,vs,na,la,qs,ms,zs,fa,ge,$e,je,bs,ha,ke,ve,_a,be,we,ga,xe,Ee,qe,ra,$a,ze,Ce,La,Cs,Os,ja,Ws,Te,ka,De,Ua,Vs,Wa,Fs,Ae,va,ye,Pe,Va,Hs,Ha,Ss,Oe,ba,Fe,Se,Ya,Ys,Ka,ws,Ne,wa,Be,Me,xa,Ie,Re,Za,xs,Ks,Le,Zs,Ea,Ue,We,Ve,js,He,qa,Ye,Ke,za,Ze,Ge,Ca,Je,Qe,Xe,Gs,st,Ta,at,et,Ga,ia,tt,Ja,Js,Qa,gs,ot,Qs,Da,nt,lt,Aa,rt,it,ya,pt,ct,Xa,Xs,se,rs,dt,Pa,mt,ut,Oa,ft,ht,Fa,_t,gt,Sa,$t,jt,ae,Ns,ee,Ts,Bs,Na,sa,kt,Ba,vt,te,Ms,oe,Is,ne;return $=new fe({}),O=new mo({props:{id:"wVHdVlPScxA"}}),V=new he({props:{$$slots:{default:[ko]},$$scope:{ctx:P}}}),J=new fe({}),H=new as({props:{code:`from datasets import load_dataset wnut = load_dataset("wnut_17")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>wnut = load_dataset(<span class="hljs-string">&quot;wnut_17&quot;</span>)`}}),ss=new as({props:{code:'wnut["train"][0]',highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>wnut[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;id&#x27;</span>: <span class="hljs-string">&#x27;0&#x27;</span>, <span class="hljs-string">&#x27;ner_tags&#x27;</span>: [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">7</span>, <span class="hljs-number">8</span>, <span class="hljs-number">8</span>, <span class="hljs-number">0</span>, <span class="hljs-number">7</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], <span class="hljs-string">&#x27;tokens&#x27;</span>: [<span class="hljs-string">&#x27;@paulwalk&#x27;</span>, <span class="hljs-string">&#x27;It&#x27;</span>, <span class="hljs-string">&quot;&#x27;s&quot;</span>, <span class="hljs-string">&#x27;the&#x27;</span>, <span class="hljs-string">&#x27;view&#x27;</span>, <span class="hljs-string">&#x27;from&#x27;</span>, <span class="hljs-string">&#x27;where&#x27;</span>, <span class="hljs-string">&#x27;I&#x27;</span>, <span class="hljs-string">&quot;&#x27;m&quot;</span>, <span class="hljs-string">&#x27;living&#x27;</span>, <span class="hljs-string">&#x27;for&#x27;</span>, <span class="hljs-string">&#x27;two&#x27;</span>, <span class="hljs-string">&#x27;weeks&#x27;</span>, <span class="hljs-string">&#x27;.&#x27;</span>, <span class="hljs-string">&#x27;Empire&#x27;</span>, <span class="hljs-string">&#x27;State&#x27;</span>, <span class="hljs-string">&#x27;Building&#x27;</span>, <span class="hljs-string">&#x27;=&#x27;</span>, <span class="hljs-string">&#x27;ESB&#x27;</span>, <span class="hljs-string">&#x27;.&#x27;</span>, <span class="hljs-string">&#x27;Pretty&#x27;</span>, <span class="hljs-string">&#x27;bad&#x27;</span>, <span class="hljs-string">&#x27;storm&#x27;</span>, <span class="hljs-string">&#x27;here&#x27;</span>, <span class="hljs-string">&#x27;last&#x27;</span>, <span class="hljs-string">&#x27;evening&#x27;</span>, <span class="hljs-string">&#x27;.&#x27;</span>] }`}}),cs=new as({props:{code:`label_list = wnut["train"].features[f"ner_tags"].feature.names label_list`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>label_list = wnut[<span class="hljs-string">&quot;train&quot;</span>].features[<span class="hljs-string">f&quot;ner_tags&quot;</span>].feature.names <span class="hljs-meta">&gt;&gt;&gt; </span>label_list [ <span class="hljs-string">&quot;O&quot;</span>, <span class="hljs-string">&quot;B-corporation&quot;</span>, <span class="hljs-string">&quot;I-corporation&quot;</span>, <span class="hljs-string">&quot;B-creative-work&quot;</span>, <span class="hljs-string">&quot;I-creative-work&quot;</span>, <span class="hljs-string">&quot;B-group&quot;</span>, <span class="hljs-string">&quot;I-group&quot;</span>, <span class="hljs-string">&quot;B-location&quot;</span>, <span class="hljs-string">&quot;I-location&quot;</span>, <span class="hljs-string">&quot;B-person&quot;</span>, <span class="hljs-string">&quot;I-person&quot;</span>, <span class="hljs-string">&quot;B-product&quot;</span>, <span class="hljs-string">&quot;I-product&quot;</span>, ]`}}),Ws=new fe({}),Vs=new mo({props:{id:"iY2AZYdZAr0"}}),Hs=new as({props:{code:`from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased")`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)`}}),Ys=new as({props:{code:`tokenized_input = tokenizer(example["tokens"], is_split_into_words=True) tokens = tokenizer.convert_ids_to_tokens(tokenized_input["input_ids"]) tokens`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span>tokenized_input = tokenizer(example[<span class="hljs-string">&quot;tokens&quot;</span>], is_split_into_words=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokens = tokenizer.convert_ids_to_tokens(tokenized_input[<span class="hljs-string">&quot;input_ids&quot;</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>tokens [<span class="hljs-string">&#x27;[CLS]&#x27;</span>, <span class="hljs-string">&#x27;@&#x27;</span>, <span class="hljs-string">&#x27;paul&#x27;</span>, <span class="hljs-string">&#x27;##walk&#x27;</span>, <span class="hljs-string">&#x27;it&#x27;</span>, <span class="hljs-string">&quot;&#x27;&quot;</span>, <span class="hljs-string">&#x27;s&#x27;</span>, <span class="hljs-string">&#x27;the&#x27;</span>, <span class="hljs-string">&#x27;view&#x27;</span>, <span class="hljs-string">&#x27;from&#x27;</span>, <span class="hljs-string">&#x27;where&#x27;</span>, <span class="hljs-string">&#x27;i&#x27;</span>, <span class="hljs-string">&quot;&#x27;&quot;</span>, <span class="hljs-string">&#x27;m&#x27;</span>, <span class="hljs-string">&#x27;living&#x27;</span>, <span class="hljs-string">&#x27;for&#x27;</span>, <span class="hljs-string">&#x27;two&#x27;</span>, <span class="hljs-string">&#x27;weeks&#x27;</span>, <span class="hljs-string">&#x27;.&#x27;</span>, <span class="hljs-string">&#x27;empire&#x27;</span>, <span class="hljs-string">&#x27;state&#x27;</span>, <span class="hljs-string">&#x27;building&#x27;</span>, <span class="hljs-string">&#x27;=&#x27;</span>, <span class="hljs-string">&#x27;es&#x27;</span>, <span class="hljs-string">&#x27;##b&#x27;</span>, <span class="hljs-string">&#x27;.&#x27;</span>, <span class="hljs-string">&#x27;pretty&#x27;</span>, <span class="hljs-string">&#x27;bad&#x27;</span>, <span class="hljs-string">&#x27;storm&#x27;</span>, <span class="hljs-string">&#x27;here&#x27;</span>, <span class="hljs-string">&#x27;last&#x27;</span>, <span class="hljs-string">&#x27;evening&#x27;</span>, <span class="hljs-string">&#x27;.&#x27;</span>, <span class="hljs-string">&#x27;[SEP]&#x27;</span>]`}}),Js=new as({props:{code:`def tokenize_and_align_labels(examples): tokenized_inputs = tokenizer(examples["tokens"], truncation=True, is_split_into_words=True) labels = [] for i, label in enumerate(examples[f"ner_tags"]): word_ids = tokenized_inputs.word_ids(batch_index=i) # Map tokens to their respective word. previous_word_idx = None label_ids = [] for word_idx in word_ids: # Set the special tokens to -100. if word_idx is None: label_ids.append(-100) elif word_idx != previous_word_idx: # Only label the first token of a given word. label_ids.append(label[word_idx]) else: label_ids.append(-100) previous_word_idx = word_idx labels.append(label_ids) tokenized_inputs["labels"] = labels return tokenized_inputs`,highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">tokenize_and_align_labels</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> tokenized_inputs = tokenizer(examples[<span class="hljs-string">&quot;tokens&quot;</span>], truncation=<span class="hljs-literal">True</span>, is_split_into_words=<span class="hljs-literal">True</span>) <span class="hljs-meta">... </span> labels = [] <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> i, label <span class="hljs-keyword">in</span> <span class="hljs-built_in">enumerate</span>(examples[<span class="hljs-string">f&quot;ner_tags&quot;</span>]): <span class="hljs-meta">... </span> word_ids = tokenized_inputs.word_ids(batch_index=i) <span class="hljs-comment"># Map tokens to their respective word.</span> <span class="hljs-meta">... </span> previous_word_idx = <span class="hljs-literal">None</span> <span class="hljs-meta">... </span> label_ids = [] <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> word_idx <span class="hljs-keyword">in</span> word_ids: <span class="hljs-comment"># Set the special tokens to -100.</span> <span class="hljs-meta">... </span> <span class="hljs-keyword">if</span> word_idx <span class="hljs-keyword">is</span> <span class="hljs-literal">None</span>: <span class="hljs-meta">... </span> label_ids.append(-<span class="hljs-number">100</span>) <span class="hljs-meta">... </span> <span class="hljs-keyword">elif</span> word_idx != previous_word_idx: <span class="hljs-comment"># Only label the first token of a given word.</span> <span class="hljs-meta">... </span> label_ids.append(label[word_idx]) <span class="hljs-meta">... </span> <span class="hljs-keyword">else</span>: <span class="hljs-meta">... </span> label_ids.append(-<span class="hljs-number">100</span>) <span class="hljs-meta">... </span> previous_word_idx = word_idx <span class="hljs-meta">... </span> labels.append(label_ids) <span class="hljs-meta">... </span> tokenized_inputs[<span class="hljs-string">&quot;labels&quot;</span>] = labels <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> tokenized_inputs`}}),Xs=new as({props:{code:"tokenized_wnut = wnut.map(tokenize_and_align_labels, batched=True)",highlighted:'<span class="hljs-meta">&gt;&gt;&gt; </span>tokenized_wnut = wnut.<span class="hljs-built_in">map</span>(tokenize_and_align_labels, batched=<span class="hljs-literal">True</span>)'}}),Ns=new uo({props:{pytorch:!0,tensorflow:!0,jax:!1,$$slots:{tensorflow:[xo],pytorch:[bo]},$$scope:{ctx:P}}}),sa=new fe({}),Ms=new uo({props:{pytorch:!0,tensorflow:!0,jax:!1,$$slots:{tensorflow:[Do],pytorch:[zo]},$$scope:{ctx:P}}}),Is=new he({props:{$$slots:{default:[Ao]},$$scope:{ctx:P}}}),{c(){t=r("meta"),u=_(),o=r("h1"),f=r("a"),j=r("span"),x($.$$.fragment),v=_(),D=r("span"),A=n("Classifica\xE7\xE3o de tokens"),w=_(),x(O.$$.fragment),L=_(),G=r("p"),N=n("A classifica\xE7\xE3o de tokens atribui um r\xF3tulo a tokens individuais em uma frase. Uma das tarefas de classifica\xE7\xE3o de tokens mais comuns \xE9 o Reconhecimento de Entidade Nomeada, tamb\xE9m chamada de NER (sigla em ingl\xEAs para Named Entity Recognition). O NER tenta encontrar um r\xF3tulo para cada entidade em uma frase, como uma pessoa, local ou organiza\xE7\xE3o."),Y=_(),F=r("p"),ns=n("Este guia mostrar\xE1 como realizar o fine-tuning do "),U=r("a"),$s=n("DistilBERT"),us=n(" no conjunto de dados "),S=r("a"),fs=n("WNUT 17"),W=n(" para detectar novas entidades."),hs=_(),x(V.$$.fragment),_s=_(),B=r("h2"),K=r("a"),I=r("span"),x(J.$$.fragment),Q=_(),ls=r("span"),R=n("Carregando o conjunto de dados WNUT 17"),es=_(),h=r("p"),y=n("Carregue o conjunto de dados WNUT 17 da biblioteca \u{1F917} Datasets:"),ts=_(),x(H.$$.fragment),X=_(),M=r("p"),ks=n("E d\xEA uma olhada em um exemplo:"),Z=_(),x(ss.$$.fragment),is=_(),os=r("p"),Ds=n("Cada n\xFAmero em "),ps=r("code"),As=n("ner_tags"),c=n(" representa uma entidade. Converta o n\xFAmero em um r\xF3tulo para obter mais informa\xE7\xF5es:"),b=_(),x(cs.$$.fragment),Us=_(),ds=r("p"),ta=n("O "),ys=r("code"),Ps=n("ner_tag"),oa=n(" descreve uma entidade, como uma organiza\xE7\xE3o, local ou pessoa. A letra que prefixa cada "),vs=r("code"),na=n("ner_tag"),la=n(" indica a posi\xE7\xE3o do token da entidade:"),qs=_(),ms=r("ul"),zs=r("li"),fa=r("code"),ge=n("B-"),$e=n(" indica o in\xEDcio de uma entidade."),je=_(),bs=r("li"),ha=r("code"),ke=n("I-"),ve=n(" indica que um token est\xE1 contido dentro da mesma entidade (por exemplo, o token "),_a=r("code"),be=n("State"),we=n(" pode fazer parte de uma entidade como "),ga=r("code"),xe=n("Empire State Building"),Ee=n(")."),qe=_(),ra=r("li"),$a=r("code"),ze=n("0"),Ce=n(" indica que o token n\xE3o corresponde a nenhuma entidade."),La=_(),Cs=r("h2"),Os=r("a"),ja=r("span"),x(Ws.$$.fragment),Te=_(),ka=r("span"),De=n("Pr\xE9-processamento"),Ua=_(),x(Vs.$$.fragment),Wa=_(),Fs=r("p"),Ae=n("Carregue o tokenizer do DistilBERT para processar os "),va=r("code"),ye=n("tokens"),Pe=n(":"),Va=_(),x(Hs.$$.fragment),Ha=_(),Ss=r("p"),Oe=n("Como a entrada j\xE1 foi dividida em palavras, defina "),ba=r("code"),Fe=n("is_split_into_words=True"),Se=n(" para tokenizar as palavras em subpalavras:"),Ya=_(),x(Ys.$$.fragment),Ka=_(),ws=r("p"),Ne=n("Ao adicionar os tokens especiais "),wa=r("code"),Be=n("[CLS]"),Me=n(" e "),xa=r("code"),Ie=n("[SEP]"),Re=n(" e a tokeniza\xE7\xE3o de subpalavras uma incompatibilidade \xE9 gerada entre a entrada e os r\xF3tulos. Uma \xFAnica palavra correspondente a um \xFAnico r\xF3tulo pode ser dividida em duas subpalavras. Voc\xEA precisar\xE1 realinhar os tokens e os r\xF3tulos da seguinte forma:"),Za=_(),xs=r("ol"),Ks=r("li"),Le=n("Mapeie todos os tokens para a palavra correspondente com o m\xE9todo "),Zs=r("a"),Ea=r("code"),Ue=n("word_ids"),We=n("."),Ve=_(),js=r("li"),He=n("Atribuindo o r\xF3tulo "),qa=r("code"),Ye=n("-100"),Ke=n(" aos tokens especiais "),za=r("code"),Ze=n("[CLS]"),Ge=n(" e "),Ca=r("code"),Je=n("[SEP]"),Qe=n(" para que a fun\xE7\xE3o de loss do PyTorch ignore eles."),Xe=_(),Gs=r("li"),st=n("Rotular apenas o primeiro token de uma determinada palavra. Atribuindo "),Ta=r("code"),at=n("-100"),et=n(" a outros subtokens da mesma palavra."),Ga=_(),ia=r("p"),tt=n("Aqui est\xE1 como voc\xEA pode criar uma fun\xE7\xE3o para realinhar os tokens e r\xF3tulos e truncar sequ\xEAncias para n\xE3o serem maiores que o comprimento m\xE1ximo de entrada do DistilBERT:"),Ja=_(),x(Js.$$.fragment),Qa=_(),gs=r("p"),ot=n("Use a fun\xE7\xE3o "),Qs=r("a"),Da=r("code"),nt=n("map"),lt=n(" do \u{1F917} Datasets para tokenizar e alinhar os r\xF3tulos em todo o conjunto de dados. Voc\xEA pode acelerar a fun\xE7\xE3o "),Aa=r("code"),rt=n("map"),it=n(" configurando "),ya=r("code"),pt=n("batched=True"),ct=n(" para processar v\xE1rios elementos do conjunto de dados de uma s\xF3 vez:"),Xa=_(),x(Xs.$$.fragment),se=_(),rs=r("p"),dt=n("Use o "),Pa=r("code"),mt=n("DataCollatorForTokenClassification"),ut=n(" para criar um batch de exemplos. Ele tamb\xE9m "),Oa=r("em"),ft=n("preencher\xE1 dinamicamente"),ht=n(" seu texto e r\xF3tulos para o comprimento do elemento mais longo em seu batch, para que tenham um comprimento uniforme. Embora seja poss\xEDvel preencher seu texto na fun\xE7\xE3o "),Fa=r("code"),_t=n("tokenizer"),gt=n(" configurando "),Sa=r("code"),$t=n("padding=True"),jt=n(", o preenchimento din\xE2mico \xE9 mais eficiente."),ae=_(),x(Ns.$$.fragment),ee=_(),Ts=r("h2"),Bs=r("a"),Na=r("span"),x(sa.$$.fragment),kt=_(),Ba=r("span"),vt=n("Treinamento"),te=_(),x(Ms.$$.fragment),oe=_(),x(Is.$$.fragment),this.h()},l(s){const m=$o('[data-svelte="svelte-1phssyn"]',document.head);t=i(m,"META",{name:!0,content:!0}),m.forEach(e),u=g(s),o=i(s,"H1",{class:!0});var aa=p(o);f=i(aa,"A",{id:!0,class:!0,href:!0});var Ma=p(f);j=i(Ma,"SPAN",{});var Ia=p(j);E($.$$.fragment,Ia),Ia.forEach(e),Ma.forEach(e),v=g(aa),D=i(aa,"SPAN",{});var Ra=p(D);A=l(Ra,"Classifica\xE7\xE3o de tokens"),Ra.forEach(e),aa.forEach(e),w=g(s),E(O.$$.fragment,s),L=g(s),G=i(s,"P",{});var xt=p(G);N=l(xt,"A classifica\xE7\xE3o de tokens atribui um r\xF3tulo a tokens individuais em uma frase. Uma das tarefas de classifica\xE7\xE3o de tokens mais comuns \xE9 o Reconhecimento de Entidade Nomeada, tamb\xE9m chamada de NER (sigla em ingl\xEAs para Named Entity Recognition). O NER tenta encontrar um r\xF3tulo para cada entidade em uma frase, como uma pessoa, local ou organiza\xE7\xE3o."),xt.forEach(e),Y=g(s),F=i(s,"P",{});var pa=p(F);ns=l(pa,"Este guia mostrar\xE1 como realizar o fine-tuning do "),U=i(pa,"A",{href:!0,rel:!0});var Et=p(U);$s=l(Et,"DistilBERT"),Et.forEach(e),us=l(pa," no conjunto de dados "),S=i(pa,"A",{href:!0,rel:!0});var qt=p(S);fs=l(qt,"WNUT 17"),qt.forEach(e),W=l(pa," para detectar novas entidades."),pa.forEach(e),hs=g(s),E(V.$$.fragment,s),_s=g(s),B=i(s,"H2",{class:!0});var le=p(B);K=i(le,"A",{id:!0,class:!0,href:!0});var zt=p(K);I=i(zt,"SPAN",{});var Ct=p(I);E(J.$$.fragment,Ct),Ct.forEach(e),zt.forEach(e),Q=g(le),ls=i(le,"SPAN",{});var Tt=p(ls);R=l(Tt,"Carregando o conjunto de dados WNUT 17"),Tt.forEach(e),le.forEach(e),es=g(s),h=i(s,"P",{});var Dt=p(h);y=l(Dt,"Carregue o conjunto de dados WNUT 17 da biblioteca \u{1F917} Datasets:"),Dt.forEach(e),ts=g(s),E(H.$$.fragment,s),X=g(s),M=i(s,"P",{});var At=p(M);ks=l(At,"E d\xEA uma olhada em um exemplo:"),At.forEach(e),Z=g(s),E(ss.$$.fragment,s),is=g(s),os=i(s,"P",{});var re=p(os);Ds=l(re,"Cada n\xFAmero em "),ps=i(re,"CODE",{});var yt=p(ps);As=l(yt,"ner_tags"),yt.forEach(e),c=l(re," representa uma entidade. Converta o n\xFAmero em um r\xF3tulo para obter mais informa\xE7\xF5es:"),re.forEach(e),b=g(s),E(cs.$$.fragment,s),Us=g(s),ds=i(s,"P",{});var ca=p(ds);ta=l(ca,"O "),ys=i(ca,"CODE",{});var Pt=p(ys);Ps=l(Pt,"ner_tag"),Pt.forEach(e),oa=l(ca," descreve uma entidade, como uma organiza\xE7\xE3o, local ou pessoa. A letra que prefixa cada "),vs=i(ca,"CODE",{});var Ot=p(vs);na=l(Ot,"ner_tag"),Ot.forEach(e),la=l(ca," indica a posi\xE7\xE3o do token da entidade:"),ca.forEach(e),qs=g(s),ms=i(s,"UL",{});var da=p(ms);zs=i(da,"LI",{});var bt=p(zs);fa=i(bt,"CODE",{});var Ft=p(fa);ge=l(Ft,"B-"),Ft.forEach(e),$e=l(bt," indica o in\xEDcio de uma entidade."),bt.forEach(e),je=g(da),bs=i(da,"LI",{});var ea=p(bs);ha=i(ea,"CODE",{});var St=p(ha);ke=l(St,"I-"),St.forEach(e),ve=l(ea," indica que um token est\xE1 contido dentro da mesma entidade (por exemplo, o token "),_a=i(ea,"CODE",{});var Nt=p(_a);be=l(Nt,"State"),Nt.forEach(e),we=l(ea," pode fazer parte de uma entidade como "),ga=i(ea,"CODE",{});var Bt=p(ga);xe=l(Bt,"Empire State Building"),Bt.forEach(e),Ee=l(ea,")."),ea.forEach(e),qe=g(da),ra=i(da,"LI",{});var wt=p(ra);$a=i(wt,"CODE",{});var Mt=p($a);ze=l(Mt,"0"),Mt.forEach(e),Ce=l(wt," indica que o token n\xE3o corresponde a nenhuma entidade."),wt.forEach(e),da.forEach(e),La=g(s),Cs=i(s,"H2",{class:!0});var ie=p(Cs);Os=i(ie,"A",{id:!0,class:!0,href:!0});var It=p(Os);ja=i(It,"SPAN",{});var Rt=p(ja);E(Ws.$$.fragment,Rt),Rt.forEach(e),It.forEach(e),Te=g(ie),ka=i(ie,"SPAN",{});var Lt=p(ka);De=l(Lt,"Pr\xE9-processamento"),Lt.forEach(e),ie.forEach(e),Ua=g(s),E(Vs.$$.fragment,s),Wa=g(s),Fs=i(s,"P",{});var pe=p(Fs);Ae=l(pe,"Carregue o tokenizer do DistilBERT para processar os "),va=i(pe,"CODE",{});var Ut=p(va);ye=l(Ut,"tokens"),Ut.forEach(e),Pe=l(pe,":"),pe.forEach(e),Va=g(s),E(Hs.$$.fragment,s),Ha=g(s),Ss=i(s,"P",{});var ce=p(Ss);Oe=l(ce,"Como a entrada j\xE1 foi dividida em palavras, defina "),ba=i(ce,"CODE",{});var Wt=p(ba);Fe=l(Wt,"is_split_into_words=True"),Wt.forEach(e),Se=l(ce," para tokenizar as palavras em subpalavras:"),ce.forEach(e),Ya=g(s),E(Ys.$$.fragment,s),Ka=g(s),ws=i(s,"P",{});var ma=p(ws);Ne=l(ma,"Ao adicionar os tokens especiais "),wa=i(ma,"CODE",{});var Vt=p(wa);Be=l(Vt,"[CLS]"),Vt.forEach(e),Me=l(ma," e "),xa=i(ma,"CODE",{});var Ht=p(xa);Ie=l(Ht,"[SEP]"),Ht.forEach(e),Re=l(ma," e a tokeniza\xE7\xE3o de subpalavras uma incompatibilidade \xE9 gerada entre a entrada e os r\xF3tulos. Uma \xFAnica palavra correspondente a um \xFAnico r\xF3tulo pode ser dividida em duas subpalavras. Voc\xEA precisar\xE1 realinhar os tokens e os r\xF3tulos da seguinte forma:"),ma.forEach(e),Za=g(s),xs=i(s,"OL",{});var ua=p(xs);Ks=i(ua,"LI",{});var de=p(Ks);Le=l(de,"Mapeie todos os tokens para a palavra correspondente com o m\xE9todo "),Zs=i(de,"A",{href:!0,rel:!0});var Yt=p(Zs);Ea=i(Yt,"CODE",{});var Kt=p(Ea);Ue=l(Kt,"word_ids"),Kt.forEach(e),Yt.forEach(e),We=l(de,"."),de.forEach(e),Ve=g(ua),js=i(ua,"LI",{});var Rs=p(js);He=l(Rs,"Atribuindo o r\xF3tulo "),qa=i(Rs,"CODE",{});var Zt=p(qa);Ye=l(Zt,"-100"),Zt.forEach(e),Ke=l(Rs," aos tokens especiais "),za=i(Rs,"CODE",{});var Gt=p(za);Ze=l(Gt,"[CLS]"),Gt.forEach(e),Ge=l(Rs," e "),Ca=i(Rs,"CODE",{});var Jt=p(Ca);Je=l(Jt,"[SEP]"),Jt.forEach(e),Qe=l(Rs," para que a fun\xE7\xE3o de loss do PyTorch ignore eles."),Rs.forEach(e),Xe=g(ua),Gs=i(ua,"LI",{});var me=p(Gs);st=l(me,"Rotular apenas o primeiro token de uma determinada palavra. Atribuindo "),Ta=i(me,"CODE",{});var Qt=p(Ta);at=l(Qt,"-100"),Qt.forEach(e),et=l(me," a outros subtokens da mesma palavra."),me.forEach(e),ua.forEach(e),Ga=g(s),ia=i(s,"P",{});var Xt=p(ia);tt=l(Xt,"Aqui est\xE1 como voc\xEA pode criar uma fun\xE7\xE3o para realinhar os tokens e r\xF3tulos e truncar sequ\xEAncias para n\xE3o serem maiores que o comprimento m\xE1ximo de entrada do DistilBERT:"),Xt.forEach(e),Ja=g(s),E(Js.$$.fragment,s),Qa=g(s),gs=i(s,"P",{});var Ls=p(gs);ot=l(Ls,"Use a fun\xE7\xE3o "),Qs=i(Ls,"A",{href:!0,rel:!0});var so=p(Qs);Da=i(so,"CODE",{});var ao=p(Da);nt=l(ao,"map"),ao.forEach(e),so.forEach(e),lt=l(Ls," do \u{1F917} Datasets para tokenizar e alinhar os r\xF3tulos em todo o conjunto de dados. Voc\xEA pode acelerar a fun\xE7\xE3o "),Aa=i(Ls,"CODE",{});var eo=p(Aa);rt=l(eo,"map"),eo.forEach(e),it=l(Ls," configurando "),ya=i(Ls,"CODE",{});var to=p(ya);pt=l(to,"batched=True"),to.forEach(e),ct=l(Ls," para processar v\xE1rios elementos do conjunto de dados de uma s\xF3 vez:"),Ls.forEach(e),Xa=g(s),E(Xs.$$.fragment,s),se=g(s),rs=i(s,"P",{});var Es=p(rs);dt=l(Es,"Use o "),Pa=i(Es,"CODE",{});var oo=p(Pa);mt=l(oo,"DataCollatorForTokenClassification"),oo.forEach(e),ut=l(Es," para criar um batch de exemplos. Ele tamb\xE9m "),Oa=i(Es,"EM",{});var no=p(Oa);ft=l(no,"preencher\xE1 dinamicamente"),no.forEach(e),ht=l(Es," seu texto e r\xF3tulos para o comprimento do elemento mais longo em seu batch, para que tenham um comprimento uniforme. Embora seja poss\xEDvel preencher seu texto na fun\xE7\xE3o "),Fa=i(Es,"CODE",{});var lo=p(Fa);_t=l(lo,"tokenizer"),lo.forEach(e),gt=l(Es," configurando "),Sa=i(Es,"CODE",{});var ro=p(Sa);$t=l(ro,"padding=True"),ro.forEach(e),jt=l(Es,", o preenchimento din\xE2mico \xE9 mais eficiente."),Es.forEach(e),ae=g(s),E(Ns.$$.fragment,s),ee=g(s),Ts=i(s,"H2",{class:!0});var ue=p(Ts);Bs=i(ue,"A",{id:!0,class:!0,href:!0});var io=p(Bs);Na=i(io,"SPAN",{});var po=p(Na);E(sa.$$.fragment,po),po.forEach(e),io.forEach(e),kt=g(ue),Ba=i(ue,"SPAN",{});var co=p(Ba);vt=l(co,"Treinamento"),co.forEach(e),ue.forEach(e),te=g(s),E(Ms.$$.fragment,s),oe=g(s),E(Is.$$.fragment,s),this.h()},h(){k(t,"name","hf:doc:metadata"),k(t,"content",JSON.stringify(Po)),k(f,"id","classificao-de-tokens"),k(f,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),k(f,"href","#classificao-de-tokens"),k(o,"class","relative group"),k(U,"href","https://huggingface.co/distilbert-base-uncased"),k(U,"rel","nofollow"),k(S,"href","https://huggingface.co/datasets/wnut_17"),k(S,"rel","nofollow"),k(K,"id","carregando-o-conjunto-de-dados-wnut-17"),k(K,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),k(K,"href","#carregando-o-conjunto-de-dados-wnut-17"),k(B,"class","relative group"),k(Os,"id","prprocessamento"),k(Os,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),k(Os,"href","#prprocessamento"),k(Cs,"class","relative group"),k(Zs,"href","https://huggingface.co/docs/tokenizers/python/latest/api/reference.html#tokenizers.Encoding.word_ids"),k(Zs,"rel","nofollow"),k(Qs,"href","https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map"),k(Qs,"rel","nofollow"),k(Bs,"id","treinamento"),k(Bs,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),k(Bs,"href","#treinamento"),k(Ts,"class","relative group")},m(s,m){a(document.head,t),d(s,u,m),d(s,o,m),a(o,f),a(f,j),q($,j,null),a(o,v),a(o,D),a(D,A),d(s,w,m),q(O,s,m),d(s,L,m),d(s,G,m),a(G,N),d(s,Y,m),d(s,F,m),a(F,ns),a(F,U),a(U,$s),a(F,us),a(F,S),a(S,fs),a(F,W),d(s,hs,m),q(V,s,m),d(s,_s,m),d(s,B,m),a(B,K),a(K,I),q(J,I,null),a(B,Q),a(B,ls),a(ls,R),d(s,es,m),d(s,h,m),a(h,y),d(s,ts,m),q(H,s,m),d(s,X,m),d(s,M,m),a(M,ks),d(s,Z,m),q(ss,s,m),d(s,is,m),d(s,os,m),a(os,Ds),a(os,ps),a(ps,As),a(os,c),d(s,b,m),q(cs,s,m),d(s,Us,m),d(s,ds,m),a(ds,ta),a(ds,ys),a(ys,Ps),a(ds,oa),a(ds,vs),a(vs,na),a(ds,la),d(s,qs,m),d(s,ms,m),a(ms,zs),a(zs,fa),a(fa,ge),a(zs,$e),a(ms,je),a(ms,bs),a(bs,ha),a(ha,ke),a(bs,ve),a(bs,_a),a(_a,be),a(bs,we),a(bs,ga),a(ga,xe),a(bs,Ee),a(ms,qe),a(ms,ra),a(ra,$a),a($a,ze),a(ra,Ce),d(s,La,m),d(s,Cs,m),a(Cs,Os),a(Os,ja),q(Ws,ja,null),a(Cs,Te),a(Cs,ka),a(ka,De),d(s,Ua,m),q(Vs,s,m),d(s,Wa,m),d(s,Fs,m),a(Fs,Ae),a(Fs,va),a(va,ye),a(Fs,Pe),d(s,Va,m),q(Hs,s,m),d(s,Ha,m),d(s,Ss,m),a(Ss,Oe),a(Ss,ba),a(ba,Fe),a(Ss,Se),d(s,Ya,m),q(Ys,s,m),d(s,Ka,m),d(s,ws,m),a(ws,Ne),a(ws,wa),a(wa,Be),a(ws,Me),a(ws,xa),a(xa,Ie),a(ws,Re),d(s,Za,m),d(s,xs,m),a(xs,Ks),a(Ks,Le),a(Ks,Zs),a(Zs,Ea),a(Ea,Ue),a(Ks,We),a(xs,Ve),a(xs,js),a(js,He),a(js,qa),a(qa,Ye),a(js,Ke),a(js,za),a(za,Ze),a(js,Ge),a(js,Ca),a(Ca,Je),a(js,Qe),a(xs,Xe),a(xs,Gs),a(Gs,st),a(Gs,Ta),a(Ta,at),a(Gs,et),d(s,Ga,m),d(s,ia,m),a(ia,tt),d(s,Ja,m),q(Js,s,m),d(s,Qa,m),d(s,gs,m),a(gs,ot),a(gs,Qs),a(Qs,Da),a(Da,nt),a(gs,lt),a(gs,Aa),a(Aa,rt),a(gs,it),a(gs,ya),a(ya,pt),a(gs,ct),d(s,Xa,m),q(Xs,s,m),d(s,se,m),d(s,rs,m),a(rs,dt),a(rs,Pa),a(Pa,mt),a(rs,ut),a(rs,Oa),a(Oa,ft),a(rs,ht),a(rs,Fa),a(Fa,_t),a(rs,gt),a(rs,Sa),a(Sa,$t),a(rs,jt),d(s,ae,m),q(Ns,s,m),d(s,ee,m),d(s,Ts,m),a(Ts,Bs),a(Bs,Na),q(sa,Na,null),a(Ts,kt),a(Ts,Ba),a(Ba,vt),d(s,te,m),q(Ms,s,m),d(s,oe,m),q(Is,s,m),ne=!0},p(s,[m]){const aa={};m&2&&(aa.$$scope={dirty:m,ctx:s}),V.$set(aa);const Ma={};m&2&&(Ma.$$scope={dirty:m,ctx:s}),Ns.$set(Ma);const Ia={};m&2&&(Ia.$$scope={dirty:m,ctx:s}),Ms.$set(Ia);const Ra={};m&2&&(Ra.$$scope={dirty:m,ctx:s}),Is.$set(Ra)},i(s){ne||(z($.$$.fragment,s),z(O.$$.fragment,s),z(V.$$.fragment,s),z(J.$$.fragment,s),z(H.$$.fragment,s),z(ss.$$.fragment,s),z(cs.$$.fragment,s),z(Ws.$$.fragment,s),z(Vs.$$.fragment,s),z(Hs.$$.fragment,s),z(Ys.$$.fragment,s),z(Js.$$.fragment,s),z(Xs.$$.fragment,s),z(Ns.$$.fragment,s),z(sa.$$.fragment,s),z(Ms.$$.fragment,s),z(Is.$$.fragment,s),ne=!0)},o(s){C($.$$.fragment,s),C(O.$$.fragment,s),C(V.$$.fragment,s),C(J.$$.fragment,s),C(H.$$.fragment,s),C(ss.$$.fragment,s),C(cs.$$.fragment,s),C(Ws.$$.fragment,s),C(Vs.$$.fragment,s),C(Hs.$$.fragment,s),C(Ys.$$.fragment,s),C(Js.$$.fragment,s),C(Xs.$$.fragment,s),C(Ns.$$.fragment,s),C(sa.$$.fragment,s),C(Ms.$$.fragment,s),C(Is.$$.fragment,s),ne=!1},d(s){e(t),s&&e(u),s&&e(o),T($),s&&e(w),T(O,s),s&&e(L),s&&e(G),s&&e(Y),s&&e(F),s&&e(hs),T(V,s),s&&e(_s),s&&e(B),T(J),s&&e(es),s&&e(h),s&&e(ts),T(H,s),s&&e(X),s&&e(M),s&&e(Z),T(ss,s),s&&e(is),s&&e(os),s&&e(b),T(cs,s),s&&e(Us),s&&e(ds),s&&e(qs),s&&e(ms),s&&e(La),s&&e(Cs),T(Ws),s&&e(Ua),T(Vs,s),s&&e(Wa),s&&e(Fs),s&&e(Va),T(Hs,s),s&&e(Ha),s&&e(Ss),s&&e(Ya),T(Ys,s),s&&e(Ka),s&&e(ws),s&&e(Za),s&&e(xs),s&&e(Ga),s&&e(ia),s&&e(Ja),T(Js,s),s&&e(Qa),s&&e(gs),s&&e(Xa),T(Xs,s),s&&e(se),s&&e(rs),s&&e(ae),T(Ns,s),s&&e(ee),s&&e(Ts),T(sa),s&&e(te),T(Ms,s),s&&e(oe),T(Is,s)}}}const Po={local:"classificao-de-tokens",sections:[{local:"carregando-o-conjunto-de-dados-wnut-17",title:"Carregando o conjunto de dados WNUT 17"},{local:"prprocessamento",title:"Pr\xE9-processamento"},{local:"treinamento",title:"Treinamento"}],title:"Classifica\xE7\xE3o de tokens"};function Oo(P){return jo(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class Ro extends ho{constructor(t){super();_o(this,t,Oo,yo,go,{})}}export{Ro as default,Po as metadata};
489
0
hf_public_repos/doc-build-dev/transformers/pr_18789/pt
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/tasks/sequence_classification.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;classificao-de-texto&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;carregue-o-conjunto-de-dados-imdb&quot;,&quot;title&quot;:&quot;Carregue o conjunto de dados IMDb&quot;},{&quot;local&quot;:&quot;prprocessamento-dos-dados&quot;,&quot;title&quot;:&quot;Pré-processamento dos dados&quot;},{&quot;local&quot;:&quot;train&quot;,&quot;title&quot;:&quot;Train&quot;}],&quot;title&quot;:&quot;Classificação de texto&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/pages/tasks/sequence_classification.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/Tip-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/Youtube-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/IconCopyLink-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/CodeBlock-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/Markdown-hf-doc-builder.js"> <h1 class="relative group"><a id="classificao-de-texto" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#classificao-de-texto"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Classificação de texto </span></h1> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/leNG9fN9FQU" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>A classificação de texto é uma tarefa comum de NLP que atribui um rótulo ou classe a um texto. Existem muitas aplicações práticas de classificação de texto amplamente utilizadas em produção por algumas das maiores empresas da atualidade. Uma das formas mais populares de classificação de texto é a análise de sentimento, que atribui um rótulo como positivo, negativo ou neutro a um texto.</p> <p>Este guia mostrará como realizar o fine-tuning do <a href="https://huggingface.co/distilbert-base-uncased" rel="nofollow">DistilBERT</a> no conjunto de dados <a href="https://huggingface.co/datasets/imdb" rel="nofollow">IMDb</a> para determinar se a crítica de filme é positiva ou negativa.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Consulte a <a href="https://huggingface.co/tasks/text-classification" rel="nofollow">página de tarefas de classificação de texto</a> para obter mais informações sobre outras formas de classificação de texto e seus modelos, conjuntos de dados e métricas associados.</p></div> <h2 class="relative group"><a id="carregue-o-conjunto-de-dados-imdb" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#carregue-o-conjunto-de-dados-imdb"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Carregue o conjunto de dados IMDb </span></h2> <p>Carregue o conjunto de dados IMDb utilizando a biblioteca 🤗 Datasets:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>imdb = load_dataset(<span class="hljs-string">&quot;imdb&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Em seguida, dê uma olhada em um exemplo:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>imdb[<span class="hljs-string">&quot;test&quot;</span>][<span class="hljs-number">0</span>] { <span class="hljs-string">&quot;label&quot;</span>: <span class="hljs-number">0</span>, <span class="hljs-string">&quot;text&quot;</span>: <span class="hljs-string">&quot;I love sci-fi and am willing to put up with a lot. Sci-fi movies/TV are usually underfunded, under-appreciated and misunderstood. I tried to like this, I really did, but it is to good TV sci-fi as Babylon 5 is to Star Trek (the original). Silly prosthetics, cheap cardboard sets, stilted dialogues, CG that doesn&#x27;t match the background, and painfully one-dimensional characters cannot be overcome with a &#x27;sci-fi&#x27; setting. (I&#x27;m sure there are those of you out there who think Babylon 5 is good sci-fi TV. It&#x27;s not. It&#x27;s clichéd and uninspiring.) While US viewers might like emotion and character development, sci-fi is a genre that does not take itself seriously (cf. Star Trek). It may treat important issues, yet not as a serious philosophy. It&#x27;s really difficult to care about the characters here as they are not simply foolish, just missing a spark of life. Their actions and reactions are wooden and predictable, often painful to watch. The makers of Earth KNOW it&#x27;s rubbish as they have to always say \&quot;Gene Roddenberry&#x27;s Earth...\&quot; otherwise people would not continue watching. Roddenberry&#x27;s ashes must be turning in their orbit as this dull, cheap, poorly edited (watching it without advert breaks really brings this home) trudging Trabant of a show lumbers into space. Spoiler. So, kill off a main character. And then bring him back as another actor. Jeeez! Dallas all over again.&quot;</span>, }<!-- HTML_TAG_END --></pre></div> <p>Existem dois campos neste dataset:</p> <ul><li><code>text</code>: uma string contendo o texto da crítica do filme.</li> <li><code>label</code>: um valor que pode ser <code>0</code> para uma crítica negativa ou <code>1</code> para uma crítica positiva.</li></ul> <h2 class="relative group"><a id="prprocessamento-dos-dados" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#prprocessamento-dos-dados"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Pré-processamento dos dados </span></h2> <p>Carregue o tokenizador do DistilBERT para processar o campo <code>text</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Crie uma função de pré-processamento para tokenizar o campo <code>text</code> e truncar as sequências para que não sejam maiores que o comprimento máximo de entrada do DistilBERT:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">preprocess_function</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> tokenizer(examples[<span class="hljs-string">&quot;text&quot;</span>], truncation=<span class="hljs-literal">True</span>)<!-- HTML_TAG_END --></pre></div> <p>Use a função <a href="https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map" rel="nofollow"><code>map</code></a> do 🤗 Datasets para aplicar a função de pré-processamento em todo o conjunto de dados. Você pode acelerar a função <code>map</code> definindo <code>batched=True</code> para processar vários elementos do conjunto de dados de uma só vez:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->tokenized_imdb = imdb.<span class="hljs-built_in">map</span>(preprocess_function, batched=<span class="hljs-literal">True</span>)<!-- HTML_TAG_END --></pre></div> <p>Use o <code>DataCollatorWithPadding</code> para criar um batch de exemplos. Ele também <em>preencherá dinamicamente</em> seu texto até o comprimento do elemento mais longo em seu batch, para que os exemplos do batch tenham um comprimento uniforme. Embora seja possível preencher seu texto com a função <code>tokenizer</code> definindo <code>padding=True</code>, o preenchimento dinâmico utilizando um data collator é mais eficiente.</p> <div class="space-y-10 py-6 2xl:py-8 2xl:-mx-4"> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <span>Pytorch</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide Pytorch content</span></div></div> <div class="framework-content"> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorWithPadding <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorWithPadding(tokenizer=tokenizer)<!-- HTML_TAG_END --></pre></div></div></div> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <span>TensorFlow</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide TensorFlow content</span></div></div> <div class="framework-content"> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorWithPadding <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorWithPadding(tokenizer=tokenizer, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)<!-- HTML_TAG_END --></pre></div> </div></div> </div> <h2 class="relative group"><a id="train" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#train"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Train </span></h2> <div class="space-y-10 py-6 2xl:py-8 2xl:-mx-4"> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <span>Pytorch</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide Pytorch content</span></div></div> <div class="framework-content"> <p>Carregue o DistilBERT com <code>AutoModelForSequenceClassification</code> junto com o número de rótulos esperados:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForSequenceClassification, TrainingArguments, Trainer <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>, num_labels=<span class="hljs-number">2</span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Se você não estiver familiarizado com o fine-tuning de um modelo com o <code>Trainer</code>, dê uma olhada no tutorial básico <a href="../training#finetune-with-trainer">aqui</a>!</p></div> <p>Nesse ponto, restam apenas três passos:</p> <ol><li>Definir seus hiperparâmetros de treinamento em <code>TrainingArguments</code>.</li> <li>Passar os argumentos de treinamento para o <code>Trainer</code> junto com o modelo, conjunto de dados, tokenizador e o data collator.</li> <li>Chamar a função <code>train()</code> para executar o fine-tuning do seu modelo.</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">2e-5</span>, <span class="hljs-meta">... </span> per_device_train_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> per_device_eval_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> num_train_epochs=<span class="hljs-number">5</span>, <span class="hljs-meta">... </span> weight_decay=<span class="hljs-number">0.01</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=tokenized_imdb[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=tokenized_imdb[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> tokenizer=tokenizer, <span class="hljs-meta">... </span> data_collator=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>O <code>Trainer</code> aplicará o preenchimento dinâmico por padrão quando você definir o argumento <code>tokenizer</code> dele. Nesse caso, você não precisa especificar um data collator explicitamente.</p></div></div></div> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <span>TensorFlow</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide TensorFlow content</span></div></div> <div class="framework-content"> <p>Para executar o fine-tuning de um modelo no TensorFlow, comece convertendo seu conjunto de dados para o formato <code>tf.data.Dataset</code> com <a href="https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.to_tf_dataset" rel="nofollow"><code>to_tf_dataset</code></a>. Nessa execução você deverá especificar as entradas e rótulos (no parâmetro <code>columns</code>), se deseja embaralhar o conjunto de dados, o tamanho do batch e o data collator:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tf_train_set = tokenized_imdb[<span class="hljs-string">&quot;train&quot;</span>].to_tf_dataset( <span class="hljs-meta">... </span> columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;label&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_validation_set = tokenized_imdb[<span class="hljs-string">&quot;test&quot;</span>].to_tf_dataset( <span class="hljs-meta">... </span> columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;label&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Se você não estiver familiarizado com o fine-tuning de um modelo com o Keras, dê uma olhada no tutorial básico <a href="training#finetune-with-keras">aqui</a>!</p></div> <p>Configure o otimizador e alguns hiperparâmetros de treinamento:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> create_optimizer <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>batch_size = <span class="hljs-number">16</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_epochs = <span class="hljs-number">5</span> <span class="hljs-meta">&gt;&gt;&gt; </span>batches_per_epoch = <span class="hljs-built_in">len</span>(tokenized_imdb[<span class="hljs-string">&quot;train&quot;</span>]) // batch_size <span class="hljs-meta">&gt;&gt;&gt; </span>total_train_steps = <span class="hljs-built_in">int</span>(batches_per_epoch * num_epochs) <span class="hljs-meta">&gt;&gt;&gt; </span>optimizer, schedule = create_optimizer(init_lr=<span class="hljs-number">2e-5</span>, num_warmup_steps=<span class="hljs-number">0</span>, num_train_steps=total_train_steps)<!-- HTML_TAG_END --></pre></div> <p>Carregue o DistilBERT com <code>TFAutoModelForSequenceClassification</code> junto com o número de rótulos esperados:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>, num_labels=<span class="hljs-number">2</span>)<!-- HTML_TAG_END --></pre></div> <p>Configure o modelo para treinamento com o método <a href="https://keras.io/api/models/model_training_apis/#compile-method" rel="nofollow"><code>compile</code></a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>model.<span class="hljs-built_in">compile</span>(optimizer=optimizer)<!-- HTML_TAG_END --></pre></div> <p>Chame o método <a href="https://keras.io/api/models/model_training_apis/#fit-method" rel="nofollow"><code>fit</code></a> para executar o fine-tuning do modelo:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=<span class="hljs-number">3</span>)<!-- HTML_TAG_END --></pre></div> </div></div> </div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Para obter um exemplo mais aprofundado de como executar o fine-tuning de um modelo para classificação de texto, dê uma olhada nesse <a href="https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification.ipynb" rel="nofollow">notebook utilizando PyTorch</a> ou nesse <a href="https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification-tf.ipynb" rel="nofollow">notebook utilizando TensorFlow</a>.</p></div> <script type="module" data-hydrate="gb80x3"> import { start } from "/docs/transformers/pr_18789/pt/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="gb80x3"]').parentNode, paths: {"base":"/docs/transformers/pr_18789/pt","assets":"/docs/transformers/pr_18789/pt"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_18789/pt/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_18789/pt/_app/pages/tasks/sequence_classification.mdx-hf-doc-builder.js") ], params: {} } }); </script>
490
0
hf_public_repos/doc-build-dev/transformers/pr_18789/pt
hf_public_repos/doc-build-dev/transformers/pr_18789/pt/tasks/token_classification.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;classificao-de-tokens&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;carregando-o-conjunto-de-dados-wnut-17&quot;,&quot;title&quot;:&quot;Carregando o conjunto de dados WNUT 17&quot;},{&quot;local&quot;:&quot;prprocessamento&quot;,&quot;title&quot;:&quot;Pré-processamento&quot;},{&quot;local&quot;:&quot;treinamento&quot;,&quot;title&quot;:&quot;Treinamento&quot;}],&quot;title&quot;:&quot;Classificação de tokens&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/pages/tasks/token_classification.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/Tip-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/Youtube-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/IconCopyLink-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/CodeBlock-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/pt/_app/chunks/Markdown-hf-doc-builder.js"> <h1 class="relative group"><a id="classificao-de-tokens" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#classificao-de-tokens"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Classificação de tokens </span></h1> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/wVHdVlPScxA" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>A classificação de tokens atribui um rótulo a tokens individuais em uma frase. Uma das tarefas de classificação de tokens mais comuns é o Reconhecimento de Entidade Nomeada, também chamada de NER (sigla em inglês para Named Entity Recognition). O NER tenta encontrar um rótulo para cada entidade em uma frase, como uma pessoa, local ou organização.</p> <p>Este guia mostrará como realizar o fine-tuning do <a href="https://huggingface.co/distilbert-base-uncased" rel="nofollow">DistilBERT</a> no conjunto de dados <a href="https://huggingface.co/datasets/wnut_17" rel="nofollow">WNUT 17</a> para detectar novas entidades.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Consulte a <a href="https://huggingface.co/tasks/token-classification" rel="nofollow">página de tarefas de classificação de tokens</a> para obter mais informações sobre outras formas de classificação de tokens e seus modelos, conjuntos de dados e métricas associadas.</p></div> <h2 class="relative group"><a id="carregando-o-conjunto-de-dados-wnut-17" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#carregando-o-conjunto-de-dados-wnut-17"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Carregando o conjunto de dados WNUT 17 </span></h2> <p>Carregue o conjunto de dados WNUT 17 da biblioteca 🤗 Datasets:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span>wnut = load_dataset(<span class="hljs-string">&quot;wnut_17&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>E dê uma olhada em um exemplo:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>wnut[<span class="hljs-string">&quot;train&quot;</span>][<span class="hljs-number">0</span>] {<span class="hljs-string">&#x27;id&#x27;</span>: <span class="hljs-string">&#x27;0&#x27;</span>, <span class="hljs-string">&#x27;ner_tags&#x27;</span>: [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">7</span>, <span class="hljs-number">8</span>, <span class="hljs-number">8</span>, <span class="hljs-number">0</span>, <span class="hljs-number">7</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], <span class="hljs-string">&#x27;tokens&#x27;</span>: [<span class="hljs-string">&#x27;@paulwalk&#x27;</span>, <span class="hljs-string">&#x27;It&#x27;</span>, <span class="hljs-string">&quot;&#x27;s&quot;</span>, <span class="hljs-string">&#x27;the&#x27;</span>, <span class="hljs-string">&#x27;view&#x27;</span>, <span class="hljs-string">&#x27;from&#x27;</span>, <span class="hljs-string">&#x27;where&#x27;</span>, <span class="hljs-string">&#x27;I&#x27;</span>, <span class="hljs-string">&quot;&#x27;m&quot;</span>, <span class="hljs-string">&#x27;living&#x27;</span>, <span class="hljs-string">&#x27;for&#x27;</span>, <span class="hljs-string">&#x27;two&#x27;</span>, <span class="hljs-string">&#x27;weeks&#x27;</span>, <span class="hljs-string">&#x27;.&#x27;</span>, <span class="hljs-string">&#x27;Empire&#x27;</span>, <span class="hljs-string">&#x27;State&#x27;</span>, <span class="hljs-string">&#x27;Building&#x27;</span>, <span class="hljs-string">&#x27;=&#x27;</span>, <span class="hljs-string">&#x27;ESB&#x27;</span>, <span class="hljs-string">&#x27;.&#x27;</span>, <span class="hljs-string">&#x27;Pretty&#x27;</span>, <span class="hljs-string">&#x27;bad&#x27;</span>, <span class="hljs-string">&#x27;storm&#x27;</span>, <span class="hljs-string">&#x27;here&#x27;</span>, <span class="hljs-string">&#x27;last&#x27;</span>, <span class="hljs-string">&#x27;evening&#x27;</span>, <span class="hljs-string">&#x27;.&#x27;</span>] }<!-- HTML_TAG_END --></pre></div> <p>Cada número em <code>ner_tags</code> representa uma entidade. Converta o número em um rótulo para obter mais informações:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>label_list = wnut[<span class="hljs-string">&quot;train&quot;</span>].features[<span class="hljs-string">f&quot;ner_tags&quot;</span>].feature.names <span class="hljs-meta">&gt;&gt;&gt; </span>label_list [ <span class="hljs-string">&quot;O&quot;</span>, <span class="hljs-string">&quot;B-corporation&quot;</span>, <span class="hljs-string">&quot;I-corporation&quot;</span>, <span class="hljs-string">&quot;B-creative-work&quot;</span>, <span class="hljs-string">&quot;I-creative-work&quot;</span>, <span class="hljs-string">&quot;B-group&quot;</span>, <span class="hljs-string">&quot;I-group&quot;</span>, <span class="hljs-string">&quot;B-location&quot;</span>, <span class="hljs-string">&quot;I-location&quot;</span>, <span class="hljs-string">&quot;B-person&quot;</span>, <span class="hljs-string">&quot;I-person&quot;</span>, <span class="hljs-string">&quot;B-product&quot;</span>, <span class="hljs-string">&quot;I-product&quot;</span>, ]<!-- HTML_TAG_END --></pre></div> <p>O <code>ner_tag</code> descreve uma entidade, como uma organização, local ou pessoa. A letra que prefixa cada <code>ner_tag</code> indica a posição do token da entidade:</p> <ul><li><code>B-</code> indica o início de uma entidade.</li> <li><code>I-</code> indica que um token está contido dentro da mesma entidade (por exemplo, o token <code>State</code> pode fazer parte de uma entidade como <code>Empire State Building</code>).</li> <li><code>0</code> indica que o token não corresponde a nenhuma entidade.</li></ul> <h2 class="relative group"><a id="prprocessamento" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#prprocessamento"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Pré-processamento </span></h2> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/iY2AZYdZAr0" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>Carregue o tokenizer do DistilBERT para processar os <code>tokens</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Como a entrada já foi dividida em palavras, defina <code>is_split_into_words=True</code> para tokenizar as palavras em subpalavras:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tokenized_input = tokenizer(example[<span class="hljs-string">&quot;tokens&quot;</span>], is_split_into_words=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tokens = tokenizer.convert_ids_to_tokens(tokenized_input[<span class="hljs-string">&quot;input_ids&quot;</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span>tokens [<span class="hljs-string">&#x27;[CLS]&#x27;</span>, <span class="hljs-string">&#x27;@&#x27;</span>, <span class="hljs-string">&#x27;paul&#x27;</span>, <span class="hljs-string">&#x27;##walk&#x27;</span>, <span class="hljs-string">&#x27;it&#x27;</span>, <span class="hljs-string">&quot;&#x27;&quot;</span>, <span class="hljs-string">&#x27;s&#x27;</span>, <span class="hljs-string">&#x27;the&#x27;</span>, <span class="hljs-string">&#x27;view&#x27;</span>, <span class="hljs-string">&#x27;from&#x27;</span>, <span class="hljs-string">&#x27;where&#x27;</span>, <span class="hljs-string">&#x27;i&#x27;</span>, <span class="hljs-string">&quot;&#x27;&quot;</span>, <span class="hljs-string">&#x27;m&#x27;</span>, <span class="hljs-string">&#x27;living&#x27;</span>, <span class="hljs-string">&#x27;for&#x27;</span>, <span class="hljs-string">&#x27;two&#x27;</span>, <span class="hljs-string">&#x27;weeks&#x27;</span>, <span class="hljs-string">&#x27;.&#x27;</span>, <span class="hljs-string">&#x27;empire&#x27;</span>, <span class="hljs-string">&#x27;state&#x27;</span>, <span class="hljs-string">&#x27;building&#x27;</span>, <span class="hljs-string">&#x27;=&#x27;</span>, <span class="hljs-string">&#x27;es&#x27;</span>, <span class="hljs-string">&#x27;##b&#x27;</span>, <span class="hljs-string">&#x27;.&#x27;</span>, <span class="hljs-string">&#x27;pretty&#x27;</span>, <span class="hljs-string">&#x27;bad&#x27;</span>, <span class="hljs-string">&#x27;storm&#x27;</span>, <span class="hljs-string">&#x27;here&#x27;</span>, <span class="hljs-string">&#x27;last&#x27;</span>, <span class="hljs-string">&#x27;evening&#x27;</span>, <span class="hljs-string">&#x27;.&#x27;</span>, <span class="hljs-string">&#x27;[SEP]&#x27;</span>]<!-- HTML_TAG_END --></pre></div> <p>Ao adicionar os tokens especiais <code>[CLS]</code> e <code>[SEP]</code> e a tokenização de subpalavras uma incompatibilidade é gerada entre a entrada e os rótulos. Uma única palavra correspondente a um único rótulo pode ser dividida em duas subpalavras. Você precisará realinhar os tokens e os rótulos da seguinte forma:</p> <ol><li>Mapeie todos os tokens para a palavra correspondente com o método <a href="https://huggingface.co/docs/tokenizers/python/latest/api/reference.html#tokenizers.Encoding.word_ids" rel="nofollow"><code>word_ids</code></a>.</li> <li>Atribuindo o rótulo <code>-100</code> aos tokens especiais <code>[CLS]</code> e <code>[SEP]</code> para que a função de loss do PyTorch ignore eles.</li> <li>Rotular apenas o primeiro token de uma determinada palavra. Atribuindo <code>-100</code> a outros subtokens da mesma palavra.</li></ol> <p>Aqui está como você pode criar uma função para realinhar os tokens e rótulos e truncar sequências para não serem maiores que o comprimento máximo de entrada do DistilBERT:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">tokenize_and_align_labels</span>(<span class="hljs-params">examples</span>): <span class="hljs-meta">... </span> tokenized_inputs = tokenizer(examples[<span class="hljs-string">&quot;tokens&quot;</span>], truncation=<span class="hljs-literal">True</span>, is_split_into_words=<span class="hljs-literal">True</span>) <span class="hljs-meta">... </span> labels = [] <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> i, label <span class="hljs-keyword">in</span> <span class="hljs-built_in">enumerate</span>(examples[<span class="hljs-string">f&quot;ner_tags&quot;</span>]): <span class="hljs-meta">... </span> word_ids = tokenized_inputs.word_ids(batch_index=i) <span class="hljs-comment"># Map tokens to their respective word.</span> <span class="hljs-meta">... </span> previous_word_idx = <span class="hljs-literal">None</span> <span class="hljs-meta">... </span> label_ids = [] <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> word_idx <span class="hljs-keyword">in</span> word_ids: <span class="hljs-comment"># Set the special tokens to -100.</span> <span class="hljs-meta">... </span> <span class="hljs-keyword">if</span> word_idx <span class="hljs-keyword">is</span> <span class="hljs-literal">None</span>: <span class="hljs-meta">... </span> label_ids.append(-<span class="hljs-number">100</span>) <span class="hljs-meta">... </span> <span class="hljs-keyword">elif</span> word_idx != previous_word_idx: <span class="hljs-comment"># Only label the first token of a given word.</span> <span class="hljs-meta">... </span> label_ids.append(label[word_idx]) <span class="hljs-meta">... </span> <span class="hljs-keyword">else</span>: <span class="hljs-meta">... </span> label_ids.append(-<span class="hljs-number">100</span>) <span class="hljs-meta">... </span> previous_word_idx = word_idx <span class="hljs-meta">... </span> labels.append(label_ids) <span class="hljs-meta">... </span> tokenized_inputs[<span class="hljs-string">&quot;labels&quot;</span>] = labels <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> tokenized_inputs<!-- HTML_TAG_END --></pre></div> <p>Use a função <a href="https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map" rel="nofollow"><code>map</code></a> do 🤗 Datasets para tokenizar e alinhar os rótulos em todo o conjunto de dados. Você pode acelerar a função <code>map</code> configurando <code>batched=True</code> para processar vários elementos do conjunto de dados de uma só vez:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tokenized_wnut = wnut.<span class="hljs-built_in">map</span>(tokenize_and_align_labels, batched=<span class="hljs-literal">True</span>)<!-- HTML_TAG_END --></pre></div> <p>Use o <code>DataCollatorForTokenClassification</code> para criar um batch de exemplos. Ele também <em>preencherá dinamicamente</em> seu texto e rótulos para o comprimento do elemento mais longo em seu batch, para que tenham um comprimento uniforme. Embora seja possível preencher seu texto na função <code>tokenizer</code> configurando <code>padding=True</code>, o preenchimento dinâmico é mais eficiente.</p> <div class="space-y-10 py-6 2xl:py-8 2xl:-mx-4"> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <span>Pytorch</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide Pytorch content</span></div></div> <div class="framework-content"> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorForTokenClassification(tokenizer=tokenizer)<!-- HTML_TAG_END --></pre></div></div></div> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <span>TensorFlow</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide TensorFlow content</span></div></div> <div class="framework-content"> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> DataCollatorForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span>data_collator = DataCollatorForTokenClassification(tokenizer=tokenizer, return_tensors=<span class="hljs-string">&quot;tf&quot;</span>)<!-- HTML_TAG_END --></pre></div> </div></div> </div> <h2 class="relative group"><a id="treinamento" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#treinamento"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Treinamento </span></h2> <div class="space-y-10 py-6 2xl:py-8 2xl:-mx-4"> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <span>Pytorch</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide Pytorch content</span></div></div> <div class="framework-content"> <p>Carregue o DistilBERT com o <code>AutoModelForTokenClassification</code> junto com o número de rótulos esperados:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForTokenClassification, TrainingArguments, Trainer <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForTokenClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>, num_labels=<span class="hljs-number">14</span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Se você não estiver familiarizado com o fine-tuning de um modelo com o <code>Trainer</code>, dê uma olhada no tutorial básico <a href="../training#finetune-with-trainer">aqui</a>!</p></div> <p>Nesse ponto, restam apenas três passos:</p> <ol><li>Definir seus hiperparâmetros de treinamento em <code>TrainingArguments</code>.</li> <li>Passar os argumentos de treinamento para o <code>Trainer</code> junto com o modelo, conjunto de dados, tokenizador e o data collator.</li> <li>Chamar a função <code>train()</code> para executar o fine-tuning do seu modelo.</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./results&quot;</span>, <span class="hljs-meta">... </span> evaluation_strategy=<span class="hljs-string">&quot;epoch&quot;</span>, <span class="hljs-meta">... </span> learning_rate=<span class="hljs-number">2e-5</span>, <span class="hljs-meta">... </span> per_device_train_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> per_device_eval_batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> num_train_epochs=<span class="hljs-number">3</span>, <span class="hljs-meta">... </span> weight_decay=<span class="hljs-number">0.01</span>, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=tokenized_wnut[<span class="hljs-string">&quot;train&quot;</span>], <span class="hljs-meta">... </span> eval_dataset=tokenized_wnut[<span class="hljs-string">&quot;test&quot;</span>], <span class="hljs-meta">... </span> tokenizer=tokenizer, <span class="hljs-meta">... </span> data_collator=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>trainer.train()<!-- HTML_TAG_END --></pre></div></div></div> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <span>TensorFlow</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide TensorFlow content</span></div></div> <div class="framework-content"> <p>Para executar o fine-tuning de um modelo no TensorFlow, comece convertendo seu conjunto de dados para o formato <code>tf.data.Dataset</code> com <a href="https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.to_tf_dataset" rel="nofollow"><code>to_tf_dataset</code></a>. Nessa execução você deverá especificar as entradas e rótulos (no parâmetro <code>columns</code>), se deseja embaralhar o conjunto de dados, o tamanho do batch e o data collator:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tf_train_set = tokenized_wnut[<span class="hljs-string">&quot;train&quot;</span>].to_tf_dataset( <span class="hljs-meta">... </span> columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;labels&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_validation_set = tokenized_wnut[<span class="hljs-string">&quot;validation&quot;</span>].to_tf_dataset( <span class="hljs-meta">... </span> columns=[<span class="hljs-string">&quot;attention_mask&quot;</span>, <span class="hljs-string">&quot;input_ids&quot;</span>, <span class="hljs-string">&quot;labels&quot;</span>], <span class="hljs-meta">... </span> shuffle=<span class="hljs-literal">False</span>, <span class="hljs-meta">... </span> batch_size=<span class="hljs-number">16</span>, <span class="hljs-meta">... </span> collate_fn=data_collator, <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Se você não estiver familiarizado com o fine-tuning de um modelo com o Keras, dê uma olhada no tutorial básico <a href="training#finetune-with-keras">aqui</a>!</p></div> <p>Configure o otimizador e alguns hiperparâmetros de treinamento:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> create_optimizer <span class="hljs-meta">&gt;&gt;&gt; </span>batch_size = <span class="hljs-number">16</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_train_epochs = <span class="hljs-number">3</span> <span class="hljs-meta">&gt;&gt;&gt; </span>num_train_steps = (<span class="hljs-built_in">len</span>(tokenized_wnut[<span class="hljs-string">&quot;train&quot;</span>]) // batch_size) * num_train_epochs <span class="hljs-meta">&gt;&gt;&gt; </span>optimizer, lr_schedule = create_optimizer( <span class="hljs-meta">... </span> init_lr=<span class="hljs-number">2e-5</span>, <span class="hljs-meta">... </span> num_train_steps=num_train_steps, <span class="hljs-meta">... </span> weight_decay_rate=<span class="hljs-number">0.01</span>, <span class="hljs-meta">... </span> num_warmup_steps=<span class="hljs-number">0</span>, <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <p>Carregue o DistilBERT com o <code>TFAutoModelForTokenClassification</code> junto com o número de rótulos esperados:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForTokenClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>, num_labels=<span class="hljs-number">2</span>)<!-- HTML_TAG_END --></pre></div> <p>Configure o modelo para treinamento com o método <a href="https://keras.io/api/models/model_training_apis/#compile-method" rel="nofollow"><code>compile</code></a>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>model.<span class="hljs-built_in">compile</span>(optimizer=optimizer)<!-- HTML_TAG_END --></pre></div> <p>Chame o método <a href="https://keras.io/api/models/model_training_apis/#fit-method" rel="nofollow"><code>fit</code></a> para executar o fine-tuning do modelo:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=<span class="hljs-number">3</span>)<!-- HTML_TAG_END --></pre></div> </div></div> </div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Para obter um exemplo mais aprofundado de como executar o fine-tuning de um modelo para classificação de tokens, dê uma olhada nesse <a href="https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification.ipynb" rel="nofollow">notebook utilizando PyTorch</a> ou nesse <a href="https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification-tf.ipynb" rel="nofollow">notebook utilizando TensorFlow</a>.</p></div> <script type="module" data-hydrate="fudthf"> import { start } from "/docs/transformers/pr_18789/pt/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="fudthf"]').parentNode, paths: {"base":"/docs/transformers/pr_18789/pt","assets":"/docs/transformers/pr_18789/pt"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_18789/pt/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_18789/pt/_app/pages/tasks/token_classification.mdx-hf-doc-builder.js") ], params: {} } }); </script>
491
0
hf_public_repos/doc-build-dev/transformers/pr_18789
hf_public_repos/doc-build-dev/transformers/pr_18789/de/installation.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;installation&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;installation-mit-pip&quot;,&quot;title&quot;:&quot;Installation mit pip&quot;},{&quot;local&quot;:&quot;installation-aus-dem-code&quot;,&quot;title&quot;:&quot;Installation aus dem Code&quot;},{&quot;local&quot;:&quot;editierbare-installation&quot;,&quot;title&quot;:&quot;Editierbare Installation&quot;},{&quot;local&quot;:&quot;installation-mit-conda&quot;,&quot;title&quot;:&quot;Installation mit conda&quot;},{&quot;local&quot;:&quot;cache-einrichtung&quot;,&quot;title&quot;:&quot;Cache Einrichtung&quot;},{&quot;local&quot;:&quot;offline-modus&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;abrufen-von-modellen-und-tokenizern-zur-offlineverwendung&quot;,&quot;title&quot;:&quot;Abrufen von Modellen und Tokenizern zur Offline-Verwendung&quot;}],&quot;title&quot;:&quot;Offline Modus&quot;}],&quot;title&quot;:&quot;Installation&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/pages/installation.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/chunks/Tip-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/chunks/IconCopyLink-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/chunks/CodeBlock-hf-doc-builder.js"> <h1 class="relative group"><a id="installation" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#installation"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Installation </span></h1> <p>Installieren Sie 🤗 Transformers für die Deep-Learning-Bibliothek, mit der Sie arbeiten, richten Sie Ihren Cache ein und konfigurieren Sie 🤗 Transformers optional für den Offline-Betrieb.</p> <p>🤗 Transformers wurde unter Python 3.6+, PyTorch 1.1.0+, TensorFlow 2.0+, und Flax getestet. Folgen Sie den Installationsanweisungen unten für die von Ihnen verwendete Deep-Learning-Bibliothek:</p> <ul><li><a href="https://pytorch.org/get-started/locally/" rel="nofollow">PyTorch</a> installation instructions.</li> <li><a href="https://www.tensorflow.org/install/pip" rel="nofollow">TensorFlow 2.0</a> installation instructions.</li> <li><a href="https://flax.readthedocs.io/en/latest/" rel="nofollow">Flax</a> installation instructions.</li></ul> <h2 class="relative group"><a id="installation-mit-pip" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#installation-mit-pip"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Installation mit pip </span></h2> <p>Sie sollten 🤗 Transformers in einer <a href="https://docs.python.org/3/library/venv.html" rel="nofollow">virtuellen Umgebung</a> installieren. Wenn Sie mit virtuellen Python-Umgebungen nicht vertraut sind, werfen Sie einen Blick auf diese <a href="https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/" rel="nofollow">Anleitung</a>. Eine virtuelle Umgebung macht es einfacher, verschiedene Projekte zu verwalten und Kompatibilitätsprobleme zwischen Abhängigkeiten zu vermeiden.</p> <p>Beginnen wir mit der Erstellung einer virtuellen Umgebung in Ihrem Projektverzeichnis:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->python -m venv .<span class="hljs-built_in">env</span><!-- HTML_TAG_END --></pre></div> <p>Aktivieren wir die virtuelle Umgebung. Unter Linux und MacOs:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-built_in">source</span> .<span class="hljs-built_in">env</span>/bin/activate<!-- HTML_TAG_END --></pre></div> <p>Aktivieren wir die virtuelle Umgebung unter Windows</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->.<span class="hljs-built_in">env</span>/Scripts/activate<!-- HTML_TAG_END --></pre></div> <p>Jetzt können wir die 🤗 Transformers mit dem folgenden Befehl installieren:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install transformers<!-- HTML_TAG_END --></pre></div> <p>Bei reiner CPU-Unterstützung können wir 🤗 Transformers und eine Deep-Learning-Bibliothek bequem in einer Zeile installieren. Installieren wir zum Beispiel 🤗 Transformers und PyTorch mit:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install transformers[torch]<!-- HTML_TAG_END --></pre></div> <p>🤗 Transformers und TensorFlow 2.0:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install transformers[tf-cpu]<!-- HTML_TAG_END --></pre></div> <p>🤗 Transformers und Flax:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install transformers[flax]<!-- HTML_TAG_END --></pre></div> <p>Überprüfen wir abschließend, ob 🤗 Transformers ordnungsgemäß installiert wurde, indem wir den folgenden Befehl ausführen. Es wird ein vortrainiertes Modell heruntergeladen:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->python -c <span class="hljs-string">&quot;from transformers import pipeline; print(pipeline(&#x27;sentiment-analysis&#x27;)(&#x27;we love you&#x27;))&quot;</span><!-- HTML_TAG_END --></pre></div> <p>Dann wird die Kategorie und die Wahrscheinlichkeit ausgegeben:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->[{<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;POSITIVE&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: 0.9998704791069031}]<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="installation-aus-dem-code" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#installation-aus-dem-code"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Installation aus dem Code </span></h2> <p>Installieren wir 🤗 Transformers aus dem Quellcode mit dem folgenden Befehl:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install git+https://github.com/huggingface/transformers<!-- HTML_TAG_END --></pre></div> <p>Dieser Befehl installiert die aktuelle <code>main</code> Version und nicht die neueste <code>stable</code> Version. Die <code>main</code>-Version ist nützlich, um mit den neuesten Entwicklungen Schritt zu halten. Zum Beispiel, wenn ein Fehler seit der letzten offiziellen Version behoben wurde, aber eine neue Version noch nicht veröffentlicht wurde. Das bedeutet jedoch, dass die “Hauptversion” nicht immer stabil ist. Wir bemühen uns, die Hauptversion einsatzbereit zu halten, und die meisten Probleme werden normalerweise innerhalb weniger Stunden oder eines Tages behoben. Wenn Sie auf ein Problem stoßen, öffnen Sie bitte ein [Issue] (<a href="https://github.com/huggingface/transformers/issues" rel="nofollow">https://github.com/huggingface/transformers/issues</a>), damit wir es noch schneller beheben können!</p> <p>Überprüfen wir, ob 🤗 Transformers richtig installiert wurde, indem Sie den folgenden Befehl ausführen:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->python -c <span class="hljs-string">&quot;from transformers import pipeline; print(pipeline(&#x27;sentiment-analysis&#x27;)(&#x27;I love you&#x27;))&quot;</span><!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="editierbare-installation" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#editierbare-installation"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Editierbare Installation </span></h2> <p>Sie benötigen eine bearbeitbare Installation, wenn Sie:</p> <ul><li>die “Haupt”-Version des Quellcodes verwenden möchten.</li> <li>Zu 🤗 Transformers beitragen und Änderungen am Code testen wollen.</li></ul> <p>Klonen Sie das Repository und installieren 🤗 Transformers mit den folgenden Befehlen:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->git <span class="hljs-built_in">clone</span> https://github.com/huggingface/transformers.git <span class="hljs-built_in">cd</span> transformers pip install -e .<!-- HTML_TAG_END --></pre></div> <p>Diese Befehle verknüpfen den Ordner, in den Sie das Repository geklont haben, mit den Pfaden Ihrer Python-Bibliotheken. Python wird nun in dem Ordner suchen, in den Sie geklont haben, zusätzlich zu den normalen Bibliothekspfaden. Wenn zum Beispiel Ihre Python-Pakete normalerweise in <code>~/anaconda3/envs/main/lib/python3.7/site-packages/</code> installiert sind, wird Python auch den Ordner durchsuchen, in den Sie geklont haben: <code>~/transformers/</code>.</p> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"><p>Sie müssen den Ordner <code>transformers</code> behalten, wenn Sie die Bibliothek weiter verwenden wollen.</p></div> <p>Jetzt können Sie Ihren Klon mit dem folgenden Befehl ganz einfach auf die neueste Version von 🤗 Transformers aktualisieren:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-built_in">cd</span> ~/transformers/ git pull<!-- HTML_TAG_END --></pre></div> <p>Ihre Python-Umgebung wird beim nächsten Ausführen die <code>main</code>-Version von 🤗 Transformers finden.</p> <h2 class="relative group"><a id="installation-mit-conda" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#installation-mit-conda"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Installation mit conda </span></h2> <p>Installation von dem conda Kanal <code>huggingface</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->conda install -c huggingface transformers<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="cache-einrichtung" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#cache-einrichtung"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Cache Einrichtung </span></h2> <p>Vorgefertigte Modelle werden heruntergeladen und lokal zwischengespeichert unter: <code>~/.cache/huggingface/hub</code>. Dies ist das Standardverzeichnis, das durch die Shell-Umgebungsvariable “TRANSFORMERS_CACHE” vorgegeben ist. Unter Windows wird das Standardverzeichnis durch <code>C:\Benutzer\Benutzername\.cache\huggingface\hub</code> angegeben. Sie können die unten aufgeführten Shell-Umgebungsvariablen - in der Reihenfolge ihrer Priorität - ändern, um ein anderes Cache-Verzeichnis anzugeben:</p> <ol><li>Shell-Umgebungsvariable (Standard): <code>HUGGINGFACE_HUB_CACHE</code> oder <code>TRANSFORMERS_CACHE</code>.</li> <li>Shell-Umgebungsvariable: <code>HF_HOME</code>.</li> <li>Shell-Umgebungsvariable: <code>XDG_CACHE_HOME</code> + <code>/huggingface</code>.</li></ol> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Transformers verwendet die Shell-Umgebungsvariablen <code>PYTORCH_TRANSFORMERS_CACHE</code> oder <code>PYTORCH_PRETRAINED_BERT_CACHE</code>, wenn Sie von einer früheren Iteration dieser Bibliothek kommen und diese Umgebungsvariablen gesetzt haben, sofern Sie nicht die Shell-Umgebungsvariable <code>TRANSFORMERS_CACHE</code> angeben.</p></div> <h2 class="relative group"><a id="offline-modus" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#offline-modus"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Offline Modus </span></h2> <p>Transformers ist in der Lage, in einer Firewall- oder Offline-Umgebung zu laufen, indem es nur lokale Dateien verwendet. Setzen Sie die Umgebungsvariable <code>TRANSFORMERS_OFFLINE=1</code>, um dieses Verhalten zu aktivieren.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Fügen sie <a href="https://huggingface.co/docs/datasets/" rel="nofollow">🤗 Datasets</a> zu Ihrem Offline-Trainingsworkflow hinzufügen, indem Sie die Umgebungsvariable <code>HF_DATASETS_OFFLINE=1</code> setzen.</p></div> <p>So würden Sie beispielsweise ein Programm in einem normalen Netzwerk mit einer Firewall für externe Instanzen mit dem folgenden Befehl ausführen:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->python examples/pytorch/translation/run_translation.py --model_name_or_path t5-small --dataset_name wmt16 --dataset_config ro-en ...<!-- HTML_TAG_END --></pre></div> <p>Führen Sie das gleiche Programm in einer Offline-Instanz mit aus:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->HF_DATASETS_OFFLINE=1 TRANSFORMERS_OFFLINE=1 \ python examples/pytorch/translation/run_translation.py --model_name_or_path t5-small --dataset_name wmt16 --dataset_config ro-en ...<!-- HTML_TAG_END --></pre></div> <p>Das Skript sollte nun laufen, ohne sich aufzuhängen oder eine Zeitüberschreitung abzuwarten, da es weiß, dass es nur nach lokalen Dateien suchen soll.</p> <h3 class="relative group"><a id="abrufen-von-modellen-und-tokenizern-zur-offlineverwendung" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#abrufen-von-modellen-und-tokenizern-zur-offlineverwendung"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Abrufen von Modellen und Tokenizern zur Offline-Verwendung </span></h3> <p>Eine andere Möglichkeit, 🤗 Transformers offline zu verwenden, besteht darin, die Dateien im Voraus herunterzuladen und dann auf ihren lokalen Pfad zu verweisen, wenn Sie sie offline verwenden müssen. Es gibt drei Möglichkeiten, dies zu tun:</p> <ul><li><p>Laden Sie eine Datei über die Benutzeroberfläche des <a href="https://huggingface.co/models" rel="nofollow">Model Hub</a> herunter, indem Sie auf das ↓-Symbol klicken.</p> <p><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/download-icon.png" alt="download-icon"></p></li> <li><p>Verwenden Sie den [PreTrainedModel.from_pretrained] und [PreTrainedModel.save_pretrained] Workflow:</p> <ol><li><p>Laden Sie Ihre Dateien im Voraus mit <code>PreTrainedModel.from_pretrained()</code> herunter:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, AutoModelForSeq2SeqLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;bigscience/T0_3B&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSeq2SeqLM.from_pretrained(<span class="hljs-string">&quot;bigscience/T0_3B&quot;</span>)<!-- HTML_TAG_END --></pre></div></li> <li><p>Speichern Sie Ihre Dateien in einem bestimmten Verzeichnis mit <code>PreTrainedModel.save_pretrained()</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.save_pretrained(<span class="hljs-string">&quot;./your/path/bigscience_t0&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model.save_pretrained(<span class="hljs-string">&quot;./your/path/bigscience_t0&quot;</span>)<!-- HTML_TAG_END --></pre></div></li> <li><p>Wenn Sie nun offline sind, laden Sie Ihre Dateien mit <code>PreTrainedModel.from_pretrained()</code> aus dem bestimmten Verzeichnis:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;./your/path/bigscience_t0&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModel.from_pretrained(<span class="hljs-string">&quot;./your/path/bigscience_t0&quot;</span>)<!-- HTML_TAG_END --></pre></div></li></ol></li> <li><p>Programmatisches Herunterladen von Dateien mit der <a href="https://github.com/huggingface/huggingface_hub/tree/main/src/huggingface_hub" rel="nofollow">huggingface_hub</a> Bibliothek:</p> <ol><li><p>Installieren Sie die “huggingface_hub”-Bibliothek in Ihrer virtuellen Umgebung:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->python -m pip install huggingface_hub<!-- HTML_TAG_END --></pre></div></li> <li><p>Verwenden Sie die Funktion <a href="https://huggingface.co/docs/hub/adding-a-library#download-files-from-the-hub" rel="nofollow"><code>hf_hub_download</code></a>, um eine Datei in einen bestimmten Pfad herunterzuladen. Der folgende Befehl lädt zum Beispiel die Datei “config.json” aus dem Modell <a href="https://huggingface.co/bigscience/T0_3B" rel="nofollow">T0</a> in den gewünschten Pfad herunter:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> huggingface_hub <span class="hljs-keyword">import</span> hf_hub_download <span class="hljs-meta">&gt;&gt;&gt; </span>hf_hub_download(repo_id=<span class="hljs-string">&quot;bigscience/T0_3B&quot;</span>, filename=<span class="hljs-string">&quot;config.json&quot;</span>, cache_dir=<span class="hljs-string">&quot;./your/path/bigscience_t0&quot;</span>)<!-- HTML_TAG_END --></pre></div></li></ol></li></ul> <p>Sobald Ihre Datei heruntergeladen und lokal zwischengespeichert ist, geben Sie den lokalen Pfad an, um sie zu laden und zu verwenden:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig <span class="hljs-meta">&gt;&gt;&gt; </span>config = AutoConfig.from_pretrained(<span class="hljs-string">&quot;./your/path/bigscience_t0/config.json&quot;</span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Weitere Informationen zum Herunterladen von Dateien, die auf dem Hub gespeichert sind, finden Sie im Abschnitt [Wie man Dateien vom Hub herunterlädt] (<a href="https://huggingface.co/docs/hub/how-to-downstream" rel="nofollow">https://huggingface.co/docs/hub/how-to-downstream</a>).</p></div> <script type="module" data-hydrate="1rxwjxr"> import { start } from "/docs/transformers/pr_18789/de/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="1rxwjxr"]').parentNode, paths: {"base":"/docs/transformers/pr_18789/de","assets":"/docs/transformers/pr_18789/de"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_18789/de/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_18789/de/_app/pages/installation.mdx-hf-doc-builder.js") ], params: {} } }); </script>
492
0
hf_public_repos/doc-build-dev/transformers/pr_18789
hf_public_repos/doc-build-dev/transformers/pr_18789/de/_toctree.yml
- sections: - local: index title: 🤗 Transformers - local: quicktour title: Schnellstart - local: installation title: Installation title: Erste Schritte - sections: - local: pipeline_tutorial title: Pipelines für Inferenzen - local: autoclass_tutorial title: Laden von vortrainierten Instanzen mit einer AutoClass - local: preprocessing title: Vorverarbeiten - local: training title: Optimierung eines vortrainierten Modells - local: accelerate title: Verteiltes Training mit 🤗 Accelerate - local: model_sharing title: Ein Modell teilen title: Tutorials
493
0
hf_public_repos/doc-build-dev/transformers/pr_18789
hf_public_repos/doc-build-dev/transformers/pr_18789/de/autoclass_tutorial.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;vortrainierte-instanzen-mit-einer-autoclass-laden&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;autotokenizer&quot;,&quot;title&quot;:&quot;AutoTokenizer&quot;},{&quot;local&quot;:&quot;autofeatureextractor&quot;,&quot;title&quot;:&quot;AutoFeatureExtractor&quot;},{&quot;local&quot;:&quot;autoprocessor&quot;,&quot;title&quot;:&quot;AutoProcessor&quot;},{&quot;local&quot;:&quot;automodel&quot;,&quot;title&quot;:&quot;AutoModel&quot;}],&quot;title&quot;:&quot;Vortrainierte Instanzen mit einer AutoClass laden&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/pages/autoclass_tutorial.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/chunks/Tip-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/chunks/IconCopyLink-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/chunks/CodeBlock-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/chunks/Markdown-hf-doc-builder.js"> <h1 class="relative group"><a id="vortrainierte-instanzen-mit-einer-autoclass-laden" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#vortrainierte-instanzen-mit-einer-autoclass-laden"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Vortrainierte Instanzen mit einer AutoClass laden </span></h1> <p>Bei so vielen verschiedenen Transformator-Architekturen kann es eine Herausforderung sein, eine für Ihren Checkpoint zu erstellen. Als Teil der 🤗 Transformers Kernphilosophie, die Bibliothek leicht, einfach und flexibel nutzbar zu machen, leitet eine <code>AutoClass</code> automatisch die richtige Architektur aus einem gegebenen Checkpoint ab und lädt sie. Mit der Methode <code>from_pretrained()</code> kann man schnell ein vortrainiertes Modell für eine beliebige Architektur laden, so dass man keine Zeit und Ressourcen aufwenden muss, um ein Modell von Grund auf zu trainieren. Die Erstellung dieser Art von Checkpoint-agnostischem Code bedeutet, dass Ihr Code, wenn er für einen Checkpoint funktioniert, auch mit einem anderen Checkpoint funktionieren wird - solange er für eine ähnliche Aufgabe trainiert wurde - selbst wenn die Architektur unterschiedlich ist.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Denken Sie daran, dass sich die Architektur auf das Skelett des Modells bezieht und die Checkpoints die Gewichte für eine bestimmte Architektur sind. Zum Beispiel ist <a href="https://huggingface.co/bert-base-uncased" rel="nofollow">BERT</a> eine Architektur, während <code>bert-base-uncased</code> ein Checkpoint ist. Modell ist ein allgemeiner Begriff, der entweder Architektur oder Prüfpunkt bedeuten kann.</p></div> <p>In dieser Anleitung lernen Sie, wie man:</p> <ul><li>Einen vortrainierten Tokenizer lädt.</li> <li>Einen vortrainierten Merkmalsextraktor lädt.</li> <li>Einen vortrainierten Prozessor lädt.</li> <li>Ein vortrainiertes Modell lädt.</li></ul> <h2 class="relative group"><a id="autotokenizer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#autotokenizer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>AutoTokenizer </span></h2> <p>Nahezu jede NLP-Aufgabe beginnt mit einem Tokenizer. Ein Tokenizer wandelt Ihre Eingabe in ein Format um, das vom Modell verarbeitet werden kann.</p> <p>Laden Sie einen Tokenizer mit <code>AutoTokenizer.from_pretrained()</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;bert-base-uncased&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Dann tokenisieren Sie Ihre Eingabe wie unten gezeigt:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>sequence = <span class="hljs-string">&quot;In a hole in the ground there lived a hobbit.&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(tokenizer(sequence)) {<span class="hljs-string">&#x27;input_ids&#x27;</span>: [<span class="hljs-number">101</span>, <span class="hljs-number">1999</span>, <span class="hljs-number">1037</span>, <span class="hljs-number">4920</span>, <span class="hljs-number">1999</span>, <span class="hljs-number">1996</span>, <span class="hljs-number">2598</span>, <span class="hljs-number">2045</span>, <span class="hljs-number">2973</span>, <span class="hljs-number">1037</span>, <span class="hljs-number">7570</span>, <span class="hljs-number">10322</span>, <span class="hljs-number">4183</span>, <span class="hljs-number">1012</span>, <span class="hljs-number">102</span>], <span class="hljs-string">&#x27;token_type_ids&#x27;</span>: [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], <span class="hljs-string">&#x27;attention_mask&#x27;</span>: [<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>]}<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="autofeatureextractor" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#autofeatureextractor"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>AutoFeatureExtractor </span></h2> <p>Für Audio- und Bildverarbeitungsaufgaben verarbeitet ein Merkmalsextraktor das Audiosignal oder Bild in das richtige Eingabeformat.</p> <p>Laden Sie einen Merkmalsextraktor mit <code>AutoFeatureExtractor.from_pretrained()</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoFeatureExtractor <span class="hljs-meta">&gt;&gt;&gt; </span>feature_extractor = AutoFeatureExtractor.from_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition&quot;</span> <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="autoprocessor" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#autoprocessor"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>AutoProcessor </span></h2> <p>Multimodale Aufgaben erfordern einen Prozessor, der zwei Arten von Vorverarbeitungswerkzeugen kombiniert. Das Modell <a href="model_doc/layoutlmv2">LayoutLMV2</a> beispielsweise benötigt einen Feature-Extraktor für Bilder und einen Tokenizer für Text; ein Prozessor kombiniert beide.</p> <p>Laden Sie einen Prozessor mit <code>AutoProcessor.from_pretrained()</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoProcessor <span class="hljs-meta">&gt;&gt;&gt; </span>processor = AutoProcessor.from_pretrained(<span class="hljs-string">&quot;microsoft/layoutlmv2-base-uncased&quot;</span>)<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="automodel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#automodel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>AutoModel </span></h2> <div class="space-y-10 py-6 2xl:py-8 2xl:-mx-4"> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <span>Pytorch</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide Pytorch content</span></div></div> <div class="framework-content"> <p>Mit den <code>AutoModelFor</code>-Klassen können Sie schließlich ein vortrainiertes Modell für eine bestimmte Aufgabe laden (siehe <a href="model_doc/auto">hier</a> für eine vollständige Liste der verfügbaren Aufgaben). Laden Sie zum Beispiel ein Modell für die Sequenzklassifikation mit <code>AutoModelForSequenceClassification.from_pretrained()</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Sie können denselben Prüfpunkt problemlos wiederverwenden, um eine Architektur für eine andere Aufgabe zu laden:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForTokenClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)<!-- HTML_TAG_END --></pre></div> <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"><p>Für PyTorch-Modelle verwendet die Methode <code>from_pretrained()</code> <code>torch.load()</code>, die intern <code>pickle</code> verwendet und als unsicher bekannt ist. Generell sollte man niemals ein Modell laden, das aus einer nicht vertrauenswürdigen Quelle stammen könnte, oder das manipuliert worden sein könnte. Dieses Sicherheitsrisiko wird für öffentliche Modelle, die auf dem Hugging Face Hub gehostet werden, teilweise gemildert, da diese bei jeder Übertragung <a href="https://huggingface.co/docs/hub/security-malware" rel="nofollow">auf Malware</a> gescannt werden. Siehe die <a href="https://huggingface.co/docs/hub/security" rel="nofollow">Hub-Dokumentation</a> für Best Practices wie <a href="https://huggingface.co/docs/hub/security-gpg#signing-commits-with-gpg" rel="nofollow">signierte Commit-Verifizierung</a> mit GPG.</p> <p>TensorFlow- und Flax-Checkpoints sind nicht betroffen und können in PyTorch-Architekturen mit den Kwargs <code>from_tf</code> und <code>from_flax</code> für die Methode <code>from_pretrained</code> geladen werden, um dieses Problem zu umgehen.</p></div> <p>Im Allgemeinen empfehlen wir die Verwendung der Klasse “AutoTokenizer” und der Klasse “AutoModelFor”, um trainierte Instanzen von Modellen zu laden. Dadurch wird sichergestellt, dass Sie jedes Mal die richtige Architektur laden. Im nächsten [Tutorial] (Vorverarbeitung) erfahren Sie, wie Sie Ihren neu geladenen Tokenizer, Feature Extractor und Prozessor verwenden, um einen Datensatz für die Feinabstimmung vorzuverarbeiten.</p></div></div> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <span>TensorFlow</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide TensorFlow content</span></div></div> <div class="framework-content"> <p>Mit den Klassen <code>TFAutoModelFor</code> schließlich können Sie ein vortrainiertes Modell für eine bestimmte Aufgabe laden (siehe <a href="model_doc/auto">hier</a> für eine vollständige Liste der verfügbaren Aufgaben). Laden Sie zum Beispiel ein Modell für die Sequenzklassifikation mit <code>TFAutoModelForSequenceClassification.from_pretrained()</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Sie können denselben Prüfpunkt problemlos wiederverwenden, um eine Architektur für eine andere Aufgabe zu laden:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForTokenClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForTokenClassification.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Im Allgemeinen empfehlen wir, die Klasse “AutoTokenizer” und die Klasse “TFAutoModelFor” zu verwenden, um vortrainierte Instanzen von Modellen zu laden. Dadurch wird sichergestellt, dass Sie jedes Mal die richtige Architektur laden. Im nächsten [Tutorial] (Vorverarbeitung) erfahren Sie, wie Sie Ihren neu geladenen Tokenizer, Feature Extractor und Prozessor verwenden, um einen Datensatz für die Feinabstimmung vorzuverarbeiten.</p> </div></div> </div> <script type="module" data-hydrate="6zyfpl"> import { start } from "/docs/transformers/pr_18789/de/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="6zyfpl"]').parentNode, paths: {"base":"/docs/transformers/pr_18789/de","assets":"/docs/transformers/pr_18789/de"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_18789/de/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_18789/de/_app/pages/autoclass_tutorial.mdx-hf-doc-builder.js") ], params: {} } }); </script>
494
0
hf_public_repos/doc-build-dev/transformers/pr_18789
hf_public_repos/doc-build-dev/transformers/pr_18789/de/pipeline_tutorial.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;pipelines-fr-inferenzen&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;verwendung-von-pipelines&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;whlen-sie-ein-modell-und-einen-tokenizer&quot;,&quot;title&quot;:&quot;Wählen Sie ein Modell und einen Tokenizer&quot;}],&quot;title&quot;:&quot;Verwendung von Pipelines&quot;},{&quot;local&quot;:&quot;audiopipeline&quot;,&quot;title&quot;:&quot;Audio-Pipeline&quot;},{&quot;local&quot;:&quot;bildverarbeitungspipeline&quot;,&quot;title&quot;:&quot;Bildverarbeitungs-Pipeline&quot;},{&quot;local&quot;:&quot;multimodale-pipeline&quot;,&quot;title&quot;:&quot;Multimodale Pipeline&quot;}],&quot;title&quot;:&quot;Pipelines für Inferenzen&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/pages/pipeline_tutorial.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/chunks/Tip-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/chunks/IconCopyLink-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/chunks/CodeBlock-hf-doc-builder.js"> <h1 class="relative group"><a id="pipelines-fr-inferenzen" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#pipelines-fr-inferenzen"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Pipelines für Inferenzen </span></h1> <p>Die <code>pipeline()</code> macht es einfach, jedes beliebige Modell aus dem <a href="https://huggingface.co/models" rel="nofollow">Hub</a> für die Inferenz auf jede Sprache, Computer Vision, Sprache und multimodale Aufgaben zu verwenden. Selbst wenn Sie keine Erfahrung mit einer bestimmten Modalität haben oder nicht mit dem zugrundeliegenden Code hinter den Modellen vertraut sind, können Sie sie mit der <code>pipeline()</code> für Inferenzen verwenden! In diesem Beispiel lernen Sie, wie:</p> <ul><li>Eine <code>pipeline()</code> für Inferenz zu verwenden.</li> <li>Einen bestimmten Tokenizer oder ein bestimmtes Modell zu verwenden.</li> <li>Eine <code>pipeline()</code> für Audio-, Vision- und multimodale Aufgaben zu verwenden.</li></ul> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Eine vollständige Liste der unterstützten Aufgaben und verfügbaren Parameter finden Sie in der <code>pipeline()</code>-Dokumentation.</p></div> <h2 class="relative group"><a id="verwendung-von-pipelines" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#verwendung-von-pipelines"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Verwendung von Pipelines </span></h2> <p>Obwohl jede Aufgabe eine zugehörige <code>pipeline()</code> hat, ist es einfacher, die allgemeine <code>pipeline()</code>-Abstraktion zu verwenden, die alle aufgabenspezifischen Pipelines enthält. Die <code>pipeline()</code> lädt automatisch ein Standardmodell und eine Vorverarbeitungsklasse, die für Ihre Aufgabe inferenzfähig ist.</p> <ol><li>Beginnen Sie mit der Erstellung einer <code>pipeline()</code> und geben Sie eine Inferenzaufgabe an:</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-meta">&gt;&gt;&gt; </span>generator = pipeline(task=<span class="hljs-string">&quot;text-generation&quot;</span>)<!-- HTML_TAG_END --></pre></div> <ol start="2"><li>Übergeben Sie Ihren Eingabetext an die <code>pipeline()</code>:</li></ol> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>generator( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone&quot;</span> <span class="hljs-meta">... </span>) <span class="hljs-comment"># doctest: +SKIP</span> [{<span class="hljs-string">&#x27;generated_text&#x27;</span>: <span class="hljs-string">&#x27;Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone, Seven for the Iron-priests at the door to the east, and thirteen for the Lord Kings at the end of the mountain&#x27;</span>}]<!-- HTML_TAG_END --></pre></div> <p>Wenn Sie mehr als eine Eingabe haben, übergeben Sie die Eingabe als Liste:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>generator( <span class="hljs-meta">... </span> [ <span class="hljs-meta">... </span> <span class="hljs-string">&quot;Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone&quot;</span>, <span class="hljs-meta">... </span> <span class="hljs-string">&quot;Nine for Mortal Men, doomed to die, One for the Dark Lord on his dark throne&quot;</span>, <span class="hljs-meta">... </span> ] <span class="hljs-meta">... </span>) <span class="hljs-comment"># doctest: +SKIP</span><!-- HTML_TAG_END --></pre></div> <p>Alle zusätzlichen Parameter für Ihre Aufgabe können auch in die <code>pipeline()</code> aufgenommen werden. Die Aufgabe <code>Text-Generierung</code> hat eine <code>generate()</code>-Methode mit mehreren Parametern zur Steuerung der Ausgabe. Wenn Sie zum Beispiel mehr als eine Ausgabe erzeugen wollen, setzen Sie den Parameter <code>num_return_sequences</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>generator( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone&quot;</span>, <span class="hljs-meta">... </span> num_return_sequences=<span class="hljs-number">2</span>, <span class="hljs-meta">... </span>) <span class="hljs-comment"># doctest: +SKIP</span><!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="whlen-sie-ein-modell-und-einen-tokenizer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#whlen-sie-ein-modell-und-einen-tokenizer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Wählen Sie ein Modell und einen Tokenizer </span></h3> <p>Die <code>pipeline()</code> akzeptiert jedes Modell aus dem [Hub] (<a href="https://huggingface.co/models" rel="nofollow">https://huggingface.co/models</a>). Auf dem Hub gibt es Tags, mit denen Sie nach einem Modell filtern können, das Sie für Ihre Aufgabe verwenden möchten. Sobald Sie ein passendes Modell ausgewählt haben, laden Sie es mit der entsprechenden <code>AutoModelFor</code> und <code>AutoTokenizer</code> Klasse. Laden Sie zum Beispiel die Klasse <code>AutoModelForCausalLM</code> für eine kausale Sprachmodellierungsaufgabe:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, AutoModelForCausalLM <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(<span class="hljs-string">&quot;distilgpt2&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForCausalLM.from_pretrained(<span class="hljs-string">&quot;distilgpt2&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Erstellen Sie eine <code>pipeline()</code> für Ihre Aufgabe, und geben Sie das Modell und den Tokenizer an, die Sie geladen haben:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-meta">&gt;&gt;&gt; </span>generator = pipeline(task=<span class="hljs-string">&quot;text-generation&quot;</span>, model=model, tokenizer=tokenizer)<!-- HTML_TAG_END --></pre></div> <p>Übergeben Sie Ihren Eingabetext an die <code>pipeline()</code> , um einen Text zu erzeugen:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>generator( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone&quot;</span> <span class="hljs-meta">... </span>) <span class="hljs-comment"># doctest: +SKIP</span> [{<span class="hljs-string">&#x27;generated_text&#x27;</span>: <span class="hljs-string">&#x27;Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone, Seven for the Dragon-lords (for them to rule in a world ruled by their rulers, and all who live within the realm&#x27;</span>}]<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="audiopipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#audiopipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Audio-Pipeline </span></h2> <p>Die <code>pipeline()</code> unterstützt auch Audioaufgaben wie Audioklassifizierung und automatische Spracherkennung.</p> <p>Lassen Sie uns zum Beispiel die Emotion in diesem Audioclip klassifizieren:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span>torch.manual_seed(<span class="hljs-number">42</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>ds = load_dataset(<span class="hljs-string">&quot;hf-internal-testing/librispeech_asr_demo&quot;</span>, <span class="hljs-string">&quot;clean&quot;</span>, split=<span class="hljs-string">&quot;validation&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>audio_file = ds[<span class="hljs-number">0</span>][<span class="hljs-string">&quot;audio&quot;</span>][<span class="hljs-string">&quot;path&quot;</span>]<!-- HTML_TAG_END --></pre></div> <p>Finden Sie ein <a href="https://huggingface.co/models?pipeline_tag=audio-classification" rel="nofollow">Audioklassifikation</a> Modell auf dem Model Hub für Emotionserkennung und laden Sie es in die <code>pipeline()</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-meta">&gt;&gt;&gt; </span>audio_classifier = pipeline( <span class="hljs-meta">... </span> task=<span class="hljs-string">&quot;audio-classification&quot;</span>, model=<span class="hljs-string">&quot;ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition&quot;</span> <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <p>Übergeben Sie die Audiodatei an die <code>pipeline()</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>preds = audio_classifier(audio_file) <span class="hljs-meta">&gt;&gt;&gt; </span>preds = [{<span class="hljs-string">&quot;score&quot;</span>: <span class="hljs-built_in">round</span>(pred[<span class="hljs-string">&quot;score&quot;</span>], <span class="hljs-number">4</span>), <span class="hljs-string">&quot;label&quot;</span>: pred[<span class="hljs-string">&quot;label&quot;</span>]} <span class="hljs-keyword">for</span> pred <span class="hljs-keyword">in</span> preds] <span class="hljs-meta">&gt;&gt;&gt; </span>preds [{<span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.1315</span>, <span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;calm&#x27;</span>}, {<span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.1307</span>, <span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;neutral&#x27;</span>}, {<span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.1274</span>, <span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;sad&#x27;</span>}, {<span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.1261</span>, <span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;fearful&#x27;</span>}, {<span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.1242</span>, <span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;happy&#x27;</span>}]<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="bildverarbeitungspipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#bildverarbeitungspipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Bildverarbeitungs-Pipeline </span></h2> <p>Die Verwendung einer <code>pipeline()</code> für Bildverarbeitungsaufgaben ist praktisch identisch.</p> <p>Geben Sie Ihre Aufgabe an und übergeben Sie Ihr Bild an den Klassifikator. Das Bild kann ein Link oder ein lokaler Pfad zu dem Bild sein. Zum Beispiel: Welche Katzenart ist unten abgebildet?</p> <p><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" alt="pipeline-cat-chonk"></p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-meta">&gt;&gt;&gt; </span>vision_classifier = pipeline(task=<span class="hljs-string">&quot;image-classification&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>preds = vision_classifier( <span class="hljs-meta">... </span> images=<span class="hljs-string">&quot;https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg&quot;</span> <span class="hljs-meta">... </span>) <span class="hljs-meta">&gt;&gt;&gt; </span>preds = [{<span class="hljs-string">&quot;score&quot;</span>: <span class="hljs-built_in">round</span>(pred[<span class="hljs-string">&quot;score&quot;</span>], <span class="hljs-number">4</span>), <span class="hljs-string">&quot;label&quot;</span>: pred[<span class="hljs-string">&quot;label&quot;</span>]} <span class="hljs-keyword">for</span> pred <span class="hljs-keyword">in</span> preds] <span class="hljs-meta">&gt;&gt;&gt; </span>preds [{<span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.4335</span>, <span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;lynx, catamount&#x27;</span>}, {<span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.0348</span>, <span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;cougar, puma, catamount, mountain lion, painter, panther, Felis concolor&#x27;</span>}, {<span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.0324</span>, <span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;snow leopard, ounce, Panthera uncia&#x27;</span>}, {<span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.0239</span>, <span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;Egyptian cat&#x27;</span>}, {<span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.0229</span>, <span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;tiger cat&#x27;</span>}]<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="multimodale-pipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#multimodale-pipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Multimodale Pipeline </span></h2> <p>Die <code>pipeline()</code> unterstützt mehr als eine Modalität. Eine Aufgabe zur Beantwortung visueller Fragen (VQA) kombiniert zum Beispiel Text und Bild. Verwenden Sie einen beliebigen Bildlink und eine Frage, die Sie zu dem Bild stellen möchten. Das Bild kann eine URL oder ein lokaler Pfad zu dem Bild sein.</p> <p>Wenn Sie zum Beispiel das gleiche Bild wie in der obigen Vision-Pipeline verwenden:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>image = <span class="hljs-string">&quot;https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>question = <span class="hljs-string">&quot;Where is the cat?&quot;</span><!-- HTML_TAG_END --></pre></div> <p>Erstellen Sie eine Pipeline für “vqa” und übergeben Sie ihr das Bild und die Frage:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-meta">&gt;&gt;&gt; </span>vqa = pipeline(task=<span class="hljs-string">&quot;vqa&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>preds = vqa(image=image, question=question) <span class="hljs-meta">&gt;&gt;&gt; </span>preds = [{<span class="hljs-string">&quot;score&quot;</span>: <span class="hljs-built_in">round</span>(pred[<span class="hljs-string">&quot;score&quot;</span>], <span class="hljs-number">4</span>), <span class="hljs-string">&quot;answer&quot;</span>: pred[<span class="hljs-string">&quot;answer&quot;</span>]} <span class="hljs-keyword">for</span> pred <span class="hljs-keyword">in</span> preds] <span class="hljs-meta">&gt;&gt;&gt; </span>preds [{<span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.9112</span>, <span class="hljs-string">&#x27;answer&#x27;</span>: <span class="hljs-string">&#x27;snow&#x27;</span>}, {<span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.8796</span>, <span class="hljs-string">&#x27;answer&#x27;</span>: <span class="hljs-string">&#x27;in snow&#x27;</span>}, {<span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.6717</span>, <span class="hljs-string">&#x27;answer&#x27;</span>: <span class="hljs-string">&#x27;outside&#x27;</span>}, {<span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.0291</span>, <span class="hljs-string">&#x27;answer&#x27;</span>: <span class="hljs-string">&#x27;on ground&#x27;</span>}, {<span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.027</span>, <span class="hljs-string">&#x27;answer&#x27;</span>: <span class="hljs-string">&#x27;ground&#x27;</span>}]<!-- HTML_TAG_END --></pre></div> <script type="module" data-hydrate="1jalw0t"> import { start } from "/docs/transformers/pr_18789/de/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="1jalw0t"]').parentNode, paths: {"base":"/docs/transformers/pr_18789/de","assets":"/docs/transformers/pr_18789/de"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_18789/de/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_18789/de/_app/pages/pipeline_tutorial.mdx-hf-doc-builder.js") ], params: {} } }); </script>
495
0
hf_public_repos/doc-build-dev/transformers/pr_18789
hf_public_repos/doc-build-dev/transformers/pr_18789/de/quicktour.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;schnellstart&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;pipeline&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;verwendung-der-pipeline&quot;,&quot;title&quot;:&quot;Verwendung der Pipeline&quot;},{&quot;local&quot;:&quot;ein-anderes-modell-und-einen-anderen-tokenizer-in-der-pipeline-verwenden&quot;,&quot;title&quot;:&quot;Ein anderes Modell und einen anderen Tokenizer in der Pipeline verwenden&quot;}],&quot;title&quot;:&quot;Pipeline&quot;},{&quot;local&quot;:&quot;autoclass&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;autotokenizer&quot;,&quot;title&quot;:&quot;AutoTokenizer&quot;},{&quot;local&quot;:&quot;automodel&quot;,&quot;title&quot;:&quot;AutoModel&quot;},{&quot;local&quot;:&quot;modell-speichern&quot;,&quot;title&quot;:&quot;Modell speichern&quot;}],&quot;title&quot;:&quot;AutoClass&quot;},{&quot;local&quot;:&quot;custom-model-builds&quot;,&quot;title&quot;:&quot;Custom model builds&quot;},{&quot;local&quot;:&quot;wie-geht-es-weiter&quot;,&quot;title&quot;:&quot;Wie geht es weiter?&quot;}],&quot;title&quot;:&quot;Schnellstart&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/pages/quicktour.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/chunks/Tip-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/chunks/Youtube-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/chunks/IconCopyLink-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/chunks/CodeBlock-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/chunks/DocNotebookDropdown-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/chunks/Markdown-hf-doc-builder.js"> <h1 class="relative group"><a id="schnellstart" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#schnellstart"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Schnellstart </span></h1> <div class="flex space-x-1 absolute z-10 right-0 top-0"> <div class="relative colab-dropdown "> <button class=" " type="button"> <img alt="Open In Colab" class="!m-0" src="https://colab.research.google.com/assets/colab-badge.svg"> </button> </div> <div class="relative colab-dropdown "> <button class=" " type="button"> <img alt="Open In Studio Lab" class="!m-0" src="https://studiolab.sagemaker.aws/studiolab.svg"> </button> </div></div> <p>Mit 🤗 Transformers können Sie sofort loslegen! Verwenden Sie die <code>pipeline()</code> für schnelle Inferenz und laden Sie schnell ein vortrainiertes Modell und einen Tokenizer mit einer <a href="./model_doc/auto">AutoClass</a>, um Ihre Text-, Bild- oder Audioaufgabe zu lösen.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Alle in der Dokumentation vorgestellten Codebeispiele haben oben links einen Umschalter für PyTorch und TensorFlow. Wenn nicht, wird erwartet, dass der Code für beide Backends ohne Änderungen funktioniert.</p></div> <h2 class="relative group"><a id="pipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#pipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Pipeline </span></h2> <p><code>pipeline()</code> ist der einfachste Weg, ein vortrainiertes Modell für eine bestimmte Aufgabe zu verwenden.</p> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/tiZFewofSLM" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>Die <code>pipeline()</code> unterstützt viele gängige Aufgaben:</p> <p><strong>Text</strong>:</p> <ul><li>Stimmungsanalyse: Klassifizierung der Polarität eines gegebenen Textes.</li> <li>Textgenerierung (auf Englisch): Generierung von Text aus einer gegebenen Eingabe.</li> <li>Name-Entity-Recognition (NER): Kennzeichnung jedes Worts mit der Entität, die es repräsentiert (Person, Datum, Ort usw.).</li> <li>Beantwortung von Fragen: Extrahieren der Antwort aus dem Kontext, wenn ein gewisser Kontext und eine Frage gegeben sind.</li> <li>Fill-mask: Ausfüllen von Lücken in einem Text mit maskierten Wörtern.</li> <li>Zusammenfassung: Erstellung einer Zusammenfassung einer langen Text- oder Dokumentensequenz.</li> <li>Übersetzung: Übersetzen eines Textes in eine andere Sprache.</li> <li>Merkmalsextraktion: Erstellen einer Tensordarstellung des Textes.</li></ul> <p><strong>Bild</strong>:</p> <ul><li>Bildklassifizierung: Klassifizierung eines Bildes.</li> <li>Bildsegmentierung: Klassifizierung jedes Pixels in einem Bild.</li> <li>Objekterkennung: Erkennen von Objekten innerhalb eines Bildes.</li></ul> <p><strong>Audio</strong>:</p> <ul><li>Audioklassifizierung: Zuweisung eines Labels zu einem bestimmten Audiosegment.</li> <li>Automatische Spracherkennung (ASR): Transkription von Audiodaten in Text.</li></ul> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Für mehr Details über die <code>pipeline()</code> und assoziierte Aufgaben, schauen Sie in die Dokumentation <a href="./main_classes/pipelines">hier</a>.</p></div> <h3 class="relative group"><a id="verwendung-der-pipeline" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#verwendung-der-pipeline"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Verwendung der Pipeline </span></h3> <p>Im folgenden Beispiel werden Sie die <code>pipeline()</code> für die Stimmungsanalyse verwenden.</p> <p>Installieren Sie die folgenden Abhängigkeiten, falls Sie dies nicht bereits getan haben:</p> <div class="space-y-10 py-6 2xl:py-8 2xl:-mx-4"> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <span>Pytorch</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide Pytorch content</span></div></div> <div class="framework-content"> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install torch<!-- HTML_TAG_END --></pre></div></div></div> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <span>TensorFlow</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide TensorFlow content</span></div></div> <div class="framework-content"> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install tensorflow<!-- HTML_TAG_END --></pre></div> </div></div> </div> <p>Importieren sie die <code>pipeline()</code> und spezifizieren sie die Aufgabe, welche sie lösen möchten:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-meta">&gt;&gt;&gt; </span>classifier = pipeline(<span class="hljs-string">&quot;sentiment-analysis&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Die Pipeline lädt ein standardmäßiges [vortrainiertes Modell] (<a href="https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english" rel="nofollow">https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english</a>) und einen Tokenizer für die Stimmungs-Analyse herunter und speichert sie. Jetzt können Sie den “Klassifikator” auf Ihren Zieltext anwenden:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>classifier(<span class="hljs-string">&quot;We are very happy to show you the 🤗 Transformers library.&quot;</span>) [{<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;POSITIVE&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.9998</span>}]<!-- HTML_TAG_END --></pre></div> <p>For more than one sentence, pass a list of sentences to the <code>pipeline()</code> which returns a list of dictionaries:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>results = classifier([<span class="hljs-string">&quot;We are very happy to show you the 🤗 Transformers library.&quot;</span>, <span class="hljs-string">&quot;We hope you don&#x27;t hate it.&quot;</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> result <span class="hljs-keyword">in</span> results: <span class="hljs-meta">... </span> <span class="hljs-built_in">print</span>(<span class="hljs-string">f&quot;label: <span class="hljs-subst">{result[<span class="hljs-string">&#x27;label&#x27;</span>]}</span>, with score: <span class="hljs-subst">{<span class="hljs-built_in">round</span>(result[<span class="hljs-string">&#x27;score&#x27;</span>], <span class="hljs-number">4</span>)}</span>&quot;</span>) label: POSITIVE, <span class="hljs-keyword">with</span> score: <span class="hljs-number">0.9998</span> label: NEGATIVE, <span class="hljs-keyword">with</span> score: <span class="hljs-number">0.5309</span><!-- HTML_TAG_END --></pre></div> <p>Die <code>pipeline()</code> kann auch über einen ganzen Datensatz iterieren. Starten wir mit der Installation der <a href="https://huggingface.co/docs/datasets/" rel="nofollow">🤗 Datasets</a> Bibliothek:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install datasets <!-- HTML_TAG_END --></pre></div> <p>Erstellen wir eine <code>pipeline()</code> mit der Aufgabe die wir lösen und dem Modell welches wir nutzen möchten.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> pipeline <span class="hljs-meta">&gt;&gt;&gt; </span>speech_recognizer = pipeline(<span class="hljs-string">&quot;automatic-speech-recognition&quot;</span>, model=<span class="hljs-string">&quot;facebook/wav2vec2-base-960h&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Als nächstes laden wir den Datensatz (siehe 🤗 Datasets <a href="https://huggingface.co/docs/datasets/quickstart.html" rel="nofollow">Quick Start</a> für mehr Details) welches wir nutzen möchten. Zum Beispiel laden wir den <a href="https://huggingface.co/datasets/PolyAI/minds14" rel="nofollow">MInDS-14</a> Datensatz:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset, Audio <span class="hljs-meta">&gt;&gt;&gt; </span>dataset = load_dataset(<span class="hljs-string">&quot;PolyAI/minds14&quot;</span>, name=<span class="hljs-string">&quot;en-US&quot;</span>, split=<span class="hljs-string">&quot;train&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Wir müssen sicherstellen, dass die Abtastrate des Datensatzes der Abtastrate entspricht, mit der <code>facebook/wav2vec2-base-960h</code> trainiert wurde.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>dataset = dataset.cast_column(<span class="hljs-string">&quot;audio&quot;</span>, Audio(sampling_rate=speech_recognizer.feature_extractor.sampling_rate))<!-- HTML_TAG_END --></pre></div> <p>Audiodateien werden automatisch geladen und neu abgetastet, wenn die Spalte “audio” aufgerufen wird. Extrahieren wir die rohen Wellenform-Arrays der ersten 4 Beispiele und übergeben wir sie als Liste an die Pipeline:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>result = speech_recognizer(dataset[:<span class="hljs-number">4</span>][<span class="hljs-string">&quot;audio&quot;</span>]) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>([d[<span class="hljs-string">&quot;text&quot;</span>] <span class="hljs-keyword">for</span> d <span class="hljs-keyword">in</span> result]) [<span class="hljs-string">&#x27;I WOULD LIKE TO SET UP A JOINT ACCOUNT WITH MY PARTNER HOW DO I PROCEED WITH DOING THAT&#x27;</span>, <span class="hljs-string">&quot;FODING HOW I&#x27;D SET UP A JOIN TO HET WITH MY WIFE AND WHERE THE AP MIGHT BE&quot;</span>, <span class="hljs-string">&quot;I I&#x27;D LIKE TOY SET UP A JOINT ACCOUNT WITH MY PARTNER I&#x27;M NOT SEEING THE OPTION TO DO IT ON THE AP SO I CALLED IN TO GET SOME HELP CAN I JUST DO IT OVER THE PHONE WITH YOU AND GIVE YOU THE INFORMATION OR SHOULD I DO IT IN THE AP AND I&#x27;M MISSING SOMETHING UQUETTE HAD PREFERRED TO JUST DO IT OVER THE PHONE OF POSSIBLE THINGS&quot;</span>, <span class="hljs-string">&#x27;HOW DO I THURN A JOIN A COUNT&#x27;</span>]<!-- HTML_TAG_END --></pre></div> <p>Bei einem größeren Datensatz mit vielen Eingaben (wie bei Sprache oder Bildverarbeitung) sollten Sie einen Generator anstelle einer Liste übergeben, der alle Eingaben in den Speicher lädt. Weitere Informationen finden Sie in der <a href="./main_classes/pipelines">Pipeline-Dokumentation</a>.</p> <h3 class="relative group"><a id="ein-anderes-modell-und-einen-anderen-tokenizer-in-der-pipeline-verwenden" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#ein-anderes-modell-und-einen-anderen-tokenizer-in-der-pipeline-verwenden"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Ein anderes Modell und einen anderen Tokenizer in der Pipeline verwenden </span></h3> <p>Die <code>pipeline()</code> kann jedes Modell aus dem [Model Hub] (<a href="https://huggingface.co/models" rel="nofollow">https://huggingface.co/models</a>) verwenden, wodurch es einfach ist, die <code>pipeline()</code> für andere Anwendungsfälle anzupassen. Wenn Sie beispielsweise ein Modell wünschen, das französischen Text verarbeiten kann, verwenden Sie die Tags im Model Hub, um nach einem geeigneten Modell zu filtern. Das oberste gefilterte Ergebnis liefert ein mehrsprachiges <a href="https://huggingface.co/nlptown/bert-base-multilingual-uncased-sentiment" rel="nofollow">BERT-Modell</a>, das auf die Stimmungsanalyse abgestimmt ist. Großartig, verwenden wir dieses Modell!</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>model_name = <span class="hljs-string">&quot;nlptown/bert-base-multilingual-uncased-sentiment&quot;</span><!-- HTML_TAG_END --></pre></div> <div class="space-y-10 py-6 2xl:py-8 2xl:-mx-4"> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <span>Pytorch</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide Pytorch content</span></div></div> <div class="framework-content"> <p>Use the <code>AutoModelForSequenceClassification</code> and <code>AutoTokenizer</code> to load the pretrained model and it’s associated tokenizer (more on an <code>AutoClass</code> below):</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, AutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModelForSequenceClassification.from_pretrained(model_name) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(model_name)<!-- HTML_TAG_END --></pre></div></div></div> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <span>TensorFlow</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide TensorFlow content</span></div></div> <div class="framework-content"> <p>Use the <code>TFAutoModelForSequenceClassification</code> and <code>AutoTokenizer</code> to load the pretrained model and it’s associated tokenizer (more on an <code>TFAutoClass</code> below):</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer, TFAutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model = TFAutoModelForSequenceClassification.from_pretrained(model_name) <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(model_name)<!-- HTML_TAG_END --></pre></div> </div></div> </div> <p>Dann können Sie das Modell und den Tokenizer in der <code>pipeline()</code> angeben und den <code>Klassifikator</code> auf Ihren Zieltext anwenden:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>classifier = pipeline(<span class="hljs-string">&quot;sentiment-analysis&quot;</span>, model=model, tokenizer=tokenizer) <span class="hljs-meta">&gt;&gt;&gt; </span>classifier(<span class="hljs-string">&quot;Nous sommes très heureux de vous présenter la bibliothèque 🤗 Transformers.&quot;</span>) [{<span class="hljs-string">&#x27;label&#x27;</span>: <span class="hljs-string">&#x27;5 stars&#x27;</span>, <span class="hljs-string">&#x27;score&#x27;</span>: <span class="hljs-number">0.7273</span>}]<!-- HTML_TAG_END --></pre></div> <p>Wenn Sie kein Modell für Ihren Anwendungsfall finden können, müssen Sie ein vortrainiertes Modell auf Ihren Daten feinabstimmen. Schauen Sie sich unser <a href="./training">Feinabstimmungs-Tutorial</a> an, um zu erfahren, wie das geht. Und schließlich, nachdem Sie Ihr trainiertes Modell verfeinert haben, sollten Sie es mit der Community im Model Hub teilen (siehe Tutorial <a href="./model_sharing">hier</a>), um NLP für alle zu demokratisieren! 🤗</p> <h2 class="relative group"><a id="autoclass" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#autoclass"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>AutoClass </span></h2> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/AhChOFRegn4" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>Unter der Haube arbeiten die Klassen <code>AutoModelForSequenceClassification</code> und <code>AutoTokenizer</code> zusammen, um die <code>pipeline()</code> zu betreiben. Eine <a href="./model_doc/auto"><code>AutoClass</code></a> ist eine Abkürzung, die automatisch die Architektur eines trainierten Modells aus dessen Namen oder Pfad abruft. Sie müssen nur die passende <code>AutoClass</code> für Ihre Aufgabe und den zugehörigen Tokenizer mit <code>AutoTokenizer</code> auswählen. </p> <p>Kehren wir zu unserem Beispiel zurück und sehen wir uns an, wie Sie die <code>AutoClass</code> verwenden können, um die Ergebnisse der <code>pipeline()</code> zu replizieren.</p> <h3 class="relative group"><a id="autotokenizer" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#autotokenizer"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>AutoTokenizer </span></h3> <p>Ein Tokenizer ist für die Vorverarbeitung von Text in ein für das Modell verständliches Format zuständig. Zunächst zerlegt der Tokenisierer den Text in Wörter, die <em>Token</em> genannt werden. Es gibt mehrere Regeln für den Tokenisierungsprozess, z. B. wie und auf welcher Ebene ein Wort aufgespalten wird (weitere Informationen über Tokenisierung <a href="./tokenizer_summary">hier</a>). Das Wichtigste ist jedoch, dass Sie den Tokenizer mit demselben Modellnamen instanziieren müssen, um sicherzustellen, dass Sie dieselben Tokenisierungsregeln verwenden, mit denen ein Modell zuvor trainiert wurde. Laden sie einen Tokenizer mit <code>AutoTokenizer</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoTokenizer <span class="hljs-meta">&gt;&gt;&gt; </span>model_name = <span class="hljs-string">&quot;nlptown/bert-base-multilingual-uncased-sentiment&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(model_name)<!-- HTML_TAG_END --></pre></div> <p>Anschließend wandelt der Tokenizer die Token in Zahlen um, um einen Tensor als Eingabe für das Modell zu konstruieren. Dieser wird als <em>Vokabular</em> des Modells bezeichnet.</p> <p>Übergeben Sie Ihren Text an den Tokenizer:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>encoding = tokenizer(<span class="hljs-string">&quot;We are very happy to show you the 🤗 Transformers library.&quot;</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(encoding) {<span class="hljs-string">&#x27;input_ids&#x27;</span>: [<span class="hljs-number">101</span>, <span class="hljs-number">11312</span>, <span class="hljs-number">10320</span>, <span class="hljs-number">12495</span>, <span class="hljs-number">19308</span>, <span class="hljs-number">10114</span>, <span class="hljs-number">11391</span>, <span class="hljs-number">10855</span>, <span class="hljs-number">10103</span>, <span class="hljs-number">100</span>, <span class="hljs-number">58263</span>, <span class="hljs-number">13299</span>, <span class="hljs-number">119</span>, <span class="hljs-number">102</span>], <span class="hljs-string">&#x27;token_type_ids&#x27;</span>: [<span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>, <span class="hljs-number">0</span>], <span class="hljs-string">&#x27;attention_mask&#x27;</span>: [<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>]}<!-- HTML_TAG_END --></pre></div> <p>Der Tokenizer gibt ein Wörterbuch zurück, das Folgendes enthält:</p> <ul><li><a href="./glossary#input-ids">input_ids</a>: numerische Repräsentationen Ihrer Token.</li> <li><a href=".glossary#attention-mask">atttention_mask</a>: gibt an, welche Token beachtet werden sollen.</li></ul> <p>Genau wie die <code>pipeline()</code> akzeptiert der Tokenizer eine Liste von Eingaben. Darüber hinaus kann der Tokenizer den Text auch auffüllen und kürzen, um einen Stapel mit einheitlicher Länge zurückzugeben:</p> <div class="space-y-10 py-6 2xl:py-8 2xl:-mx-4"> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <span>Pytorch</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide Pytorch content</span></div></div> <div class="framework-content"> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>pt_batch = tokenizer( <span class="hljs-meta">... </span> [<span class="hljs-string">&quot;We are very happy to show you the 🤗 Transformers library.&quot;</span>, <span class="hljs-string">&quot;We hope you don&#x27;t hate it.&quot;</span>], <span class="hljs-meta">... </span> padding=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> truncation=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> max_length=<span class="hljs-number">512</span>, <span class="hljs-meta">... </span> return_tensors=<span class="hljs-string">&quot;pt&quot;</span>, <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div></div></div> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <span>TensorFlow</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide TensorFlow content</span></div></div> <div class="framework-content"> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tf_batch = tokenizer( <span class="hljs-meta">... </span> [<span class="hljs-string">&quot;We are very happy to show you the 🤗 Transformers library.&quot;</span>, <span class="hljs-string">&quot;We hope you don&#x27;t hate it.&quot;</span>], <span class="hljs-meta">... </span> padding=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> truncation=<span class="hljs-literal">True</span>, <span class="hljs-meta">... </span> max_length=<span class="hljs-number">512</span>, <span class="hljs-meta">... </span> return_tensors=<span class="hljs-string">&quot;tf&quot;</span>, <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> </div></div> </div> <p>Lesen Sie das Tutorial <a href="./preprocessing">preprocessing</a> für weitere Details zur Tokenisierung.</p> <h3 class="relative group"><a id="automodel" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#automodel"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>AutoModel </span></h3> <div class="space-y-10 py-6 2xl:py-8 2xl:-mx-4"> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <span>Pytorch</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide Pytorch content</span></div></div> <div class="framework-content"> <p>🤗 Transformers bietet eine einfache und einheitliche Möglichkeit, vortrainierte Instanzen zu laden. Das bedeutet, dass Sie ein <code>AutoModel</code> laden können, wie Sie einen <code>AutoTokenizer</code> laden würden. Der einzige Unterschied ist die Auswahl des richtigen <code>AutoModel</code> für die Aufgabe. Da Sie eine Text- oder Sequenzklassifizierung vornehmen, laden Sie <code>AutoModelForSequenceClassification</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model_name = <span class="hljs-string">&quot;nlptown/bert-base-multilingual-uncased-sentiment&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>pt_model = AutoModelForSequenceClassification.from_pretrained(model_name)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>In der <a href="./task_summary">Aufgabenzusammenfassung</a> steht, welche [AutoModel]-Klasse für welche Aufgabe zu verwenden ist.</p></div> <p>Jetzt können Sie Ihren vorverarbeiteten Stapel von Eingaben direkt an das Modell übergeben. Sie müssen nur das Wörterbuch entpacken, indem Sie <code>**</code> hinzufügen:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>pt_outputs = pt_model(**pt_batch)<!-- HTML_TAG_END --></pre></div> <p>Das Modell gibt die endgültigen Aktivierungen in dem Attribut “logits” aus. Wenden Sie die Softmax-Funktion auf die “logits” an, um die Wahrscheinlichkeiten zu erhalten:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> torch <span class="hljs-keyword">import</span> nn <span class="hljs-meta">&gt;&gt;&gt; </span>pt_predictions = nn.functional.softmax(pt_outputs.logits, dim=-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-built_in">print</span>(pt_predictions) tensor([[<span class="hljs-number">0.0021</span>, <span class="hljs-number">0.0018</span>, <span class="hljs-number">0.0115</span>, <span class="hljs-number">0.2121</span>, <span class="hljs-number">0.7725</span>], [<span class="hljs-number">0.2084</span>, <span class="hljs-number">0.1826</span>, <span class="hljs-number">0.1969</span>, <span class="hljs-number">0.1755</span>, <span class="hljs-number">0.2365</span>]], grad_fn=&lt;SoftmaxBackward0&gt;)<!-- HTML_TAG_END --></pre></div></div></div> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <span>TensorFlow</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide TensorFlow content</span></div></div> <div class="framework-content"> <p>🤗 Transformers bietet eine einfache und einheitliche Methode zum Laden von vortrainierten Instanzen. Das bedeutet, dass Sie ein <code>TFAutoModel</code> genauso laden können, wie Sie einen <code>AutoTokenizer</code> laden würden. Der einzige Unterschied ist die Auswahl des richtigen <code>TFAutoModel</code> für die Aufgabe. Da Sie Text - oder Sequenz - Klassifizierung machen, laden Sie <code>TFAutoModelForSequenceClassification</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModelForSequenceClassification <span class="hljs-meta">&gt;&gt;&gt; </span>model_name = <span class="hljs-string">&quot;nlptown/bert-base-multilingual-uncased-sentiment&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tf_model = TFAutoModelForSequenceClassification.from_pretrained(model_name)<!-- HTML_TAG_END --></pre></div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>In der <a href="./task_summary">Aufgabenzusammenfassung</a> steht, welche [AutoModel]-Klasse für welche Aufgabe zu verwenden ist.</p></div> <p>Jetzt können Sie Ihren vorverarbeiteten Stapel von Eingaben direkt an das Modell übergeben, indem Sie die Wörterbuchschlüssel direkt an die Tensoren übergeben:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tf_outputs = tf_model(tf_batch)<!-- HTML_TAG_END --></pre></div> <p>Das Modell gibt die endgültigen Aktivierungen in dem Attribut “logits” aus. Wenden Sie die Softmax-Funktion auf die “logits” an, um die Wahrscheinlichkeiten zu erhalten:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf <span class="hljs-meta">&gt;&gt;&gt; </span>tf_predictions = tf.nn.softmax(tf_outputs.logits, axis=-<span class="hljs-number">1</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_predictions<!-- HTML_TAG_END --></pre></div> </div></div> </div> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Alle 🤗 Transformers-Modelle (PyTorch oder TensorFlow) geben die Tensoren <em>vor</em> der endgültigen Aktivierungsfunktion Funktion (wie Softmax) aus, da die endgültige Aktivierungsfunktion oft mit dem Verlusten verschmolzen ist.</p></div> <p>Modelle sind ein standardmäßiges <a href="https://pytorch.org/docs/stable/nn.html#torch.nn.Module" rel="nofollow"><code>torch.nn.Module</code></a> oder ein <a href="https://www.tensorflow.org/api_docs/python/tf/keras/Model" rel="nofollow"><code>tf.keras.Model</code></a>, sodass Sie sie in Ihrer üblichen Trainingsschleife verwenden können. Um jedoch die Dinge einfacher zu machen, bietet 🤗 Transformers eine <code>Trainer</code>-Klasse für PyTorch, die Funktionalität für verteiltes Training, gemischte Präzision und mehr bietet. Für TensorFlow können Sie die Methode <code>fit</code> aus <a href="https://keras.io/" rel="nofollow">Keras</a> verwenden. Siehe das <a href="./training">training tutorial</a> für weitere Details.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Transformers-Modellausgaben sind spezielle Datenklassen, so dass ihre Attribute in einer IDE automatisch vervollständigt werden. Die Modellausgänge verhalten sich auch wie ein Tupel oder ein Wörterbuch (z.B. können Sie mit einem Integer, einem Slice oder einem String indexieren), wobei die Attribute, die “None” sind, ignoriert werden.</p></div> <h3 class="relative group"><a id="modell-speichern" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#modell-speichern"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Modell speichern </span></h3> <div class="space-y-10 py-6 2xl:py-8 2xl:-mx-4"> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <span>Pytorch</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide Pytorch content</span></div></div> <div class="framework-content"> <p>Sobald Ihr Modell feinabgestimmt ist, können Sie es mit seinem Tokenizer speichern, indem Sie <code>PreTrainedModel.save_pretrained()</code> verwenden:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>pt_save_directory = <span class="hljs-string">&quot;./pt_save_pretrained&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.save_pretrained(pt_save_directory) <span class="hljs-meta">&gt;&gt;&gt; </span>pt_model.save_pretrained(pt_save_directory)<!-- HTML_TAG_END --></pre></div> <p>Wenn Sie bereit sind, das Modell erneut zu verwenden, laden Sie es mit <code>PreTrainedModel.from_pretrained()</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>pt_model = AutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;./pt_save_pretrained&quot;</span>)<!-- HTML_TAG_END --></pre></div></div></div> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <span>TensorFlow</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide TensorFlow content</span></div></div> <div class="framework-content"> <p>Sobald Ihr Modell feinabgestimmt ist, können Sie es mit seinem Tokenizer unter Verwendung von <code>TFPreTrainedModel.save_pretrained()</code> speichern:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tf_save_directory = <span class="hljs-string">&quot;./tf_save_pretrained&quot;</span> <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.save_pretrained(tf_save_directory) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_model.save_pretrained(tf_save_directory)<!-- HTML_TAG_END --></pre></div> <p>Wenn Sie bereit sind, das Modell wieder zu verwenden, laden Sie es mit <code>TFPreTrainedModel.from_pretrained()</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tf_model = TFAutoModelForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;./tf_save_pretrained&quot;</span>)<!-- HTML_TAG_END --></pre></div> </div></div> </div> <p>Ein besonders cooles 🤗 Transformers-Feature ist die Möglichkeit, ein Modell zu speichern und es entweder als PyTorch- oder TensorFlow-Modell wieder zu laden. Der Parameter “from_pt” oder “from_tf” kann das Modell von einem Framework in das andere konvertieren:</p> <div class="space-y-10 py-6 2xl:py-8 2xl:-mx-4"> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <span>Pytorch</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide Pytorch content</span></div></div> <div class="framework-content"> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(tf_save_directory) <span class="hljs-meta">&gt;&gt;&gt; </span>pt_model = AutoModelForSequenceClassification.from_pretrained(tf_save_directory, from_tf=<span class="hljs-literal">True</span>)<!-- HTML_TAG_END --></pre></div></div></div> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <span>TensorFlow</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide TensorFlow content</span></div></div> <div class="framework-content"> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModel <span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer = AutoTokenizer.from_pretrained(pt_save_directory) <span class="hljs-meta">&gt;&gt;&gt; </span>tf_model = TFAutoModelForSequenceClassification.from_pretrained(pt_save_directory, from_pt=<span class="hljs-literal">True</span>)<!-- HTML_TAG_END --></pre></div> </div></div> </div> <h2 class="relative group"><a id="custom-model-builds" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#custom-model-builds"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Custom model builds </span></h2> <p>Sie können die Konfigurationsklasse des Modells ändern, um zu bestimmen, wie ein Modell aufgebaut ist. Die Konfiguration legt die Attribute eines Modells fest, z. B. die Anzahl der verborgenen Schichten oder der Aufmerksamkeitsköpfe. Wenn Sie ein Modell aus einer benutzerdefinierten Konfigurationsklasse initialisieren, beginnen Sie bei Null. Die Modellattribute werden zufällig initialisiert, und Sie müssen das Modell trainieren, bevor Sie es verwenden können, um aussagekräftige Ergebnisse zu erhalten.</p> <p>Beginnen Sie mit dem Import von <code>AutoConfig</code> und laden Sie dann das trainierte Modell, das Sie ändern möchten. Innerhalb von <code>AutoConfig.from_pretrained()</code> können Sie das Attribut angeben, das Sie ändern möchten, z. B. die Anzahl der Aufmerksamkeitsköpfe:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoConfig <span class="hljs-meta">&gt;&gt;&gt; </span>my_config = AutoConfig.from_pretrained(<span class="hljs-string">&quot;distilbert-base-uncased&quot;</span>, n_heads=<span class="hljs-number">12</span>)<!-- HTML_TAG_END --></pre></div> <div class="space-y-10 py-6 2xl:py-8 2xl:-mx-4"> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <span>Pytorch</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide Pytorch content</span></div></div> <div class="framework-content"> <p>Create a model from your custom configuration with <code>AutoModel.from_config()</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModel <span class="hljs-meta">&gt;&gt;&gt; </span>my_model = AutoModel.from_config(my_config)<!-- HTML_TAG_END --></pre></div></div></div> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <span>TensorFlow</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide TensorFlow content</span></div></div> <div class="framework-content"> <p>Create a model from your custom configuration with <code>TFAutoModel.from_config()</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> TFAutoModel <span class="hljs-meta">&gt;&gt;&gt; </span>my_model = TFAutoModel.from_config(my_config)<!-- HTML_TAG_END --></pre></div> </div></div> </div> <p>Weitere Informationen zur Erstellung von benutzerdefinierten Konfigurationen finden Sie in der Anleitung <a href="./create_a_model">Erstellen einer benutzerdefinierten Architektur</a>.</p> <h2 class="relative group"><a id="wie-geht-es-weiter" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#wie-geht-es-weiter"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Wie geht es weiter? </span></h2> <p>Nachdem Sie nun die 🤗 Transformers-Kurztour abgeschlossen haben, schauen Sie sich unsere Anleitungen an und erfahren Sie, wie Sie spezifischere Dinge tun können, wie das Schreiben eines benutzerdefinierten Modells, die Feinabstimmung eines Modells für eine Aufgabe und wie man ein Modell mit einem Skript trainiert. Wenn Sie mehr über die Kernkonzepte von 🤗 Transformers erfahren möchten, nehmen Sie sich eine Tasse Kaffee und werfen Sie einen Blick auf unsere konzeptionellen Leitfäden!</p> <script type="module" data-hydrate="1ixwwrw"> import { start } from "/docs/transformers/pr_18789/de/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="1ixwwrw"]').parentNode, paths: {"base":"/docs/transformers/pr_18789/de","assets":"/docs/transformers/pr_18789/de"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_18789/de/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_18789/de/_app/pages/quicktour.mdx-hf-doc-builder.js") ], params: {} } }); </script>
496
0
hf_public_repos/doc-build-dev/transformers/pr_18789
hf_public_repos/doc-build-dev/transformers/pr_18789/de/index.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;transformers&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;wenn-sie-auf-der-suche-nach-individueller-untersttzung-durch-das-hugging-faceteam-sind&quot;,&quot;title&quot;:&quot;Wenn Sie auf der Suche nach individueller Unterstützung durch das Hugging Face-Team sind&quot;},{&quot;local&quot;:&quot;inhalt&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;untersttze-modelle&quot;,&quot;title&quot;:&quot;Unterstütze Modelle&quot;},{&quot;local&quot;:&quot;untersttzte-frameworks&quot;,&quot;title&quot;:&quot;Unterstützte Frameworks&quot;}],&quot;title&quot;:&quot;Inhalt&quot;}],&quot;title&quot;:&quot;🤗 Transformers&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/pages/index.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/chunks/IconCopyLink-hf-doc-builder.js"> <h1 class="relative group"><a id="transformers" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#transformers"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>🤗 Transformers </span></h1> <p>Maschinelles Lernen auf dem neuesten Stand der Technik für PyTorch, TensorFlow und JAX.</p> <p>🤗 Transformers bietet APIs zum einfachen Herunterladen und Trainieren von vortrainierten Modellen auf dem neuesten Stand der Technik. Die Verwendung von vortrainierten Modellen kann Rechenkosten sparen und den CO2-Fußabdruck reduzieren und Zeit sparen, die für das Training eines Modells von Grund auf benötigt wird. Die Modelle können für verschiedene Modalitäten verwendet werden, wie z. B.:</p> <ul><li>📝 Text: Textklassifizierung, Informationsextrahierung, Beantwortung von Fragen, Zusammenfassung, Übersetzung und Texterstellung in über 100 Sprachen.</li> <li>🖼️ Bilder: Bildklassifizierung, Objekterkennung und Segmentierung.</li> <li>🗣️ Audio: Spracherkennung und Audioklassifizierung.</li> <li>🐙 Multimodal: Beantwortung von Tabellenfragen, optische Zeichenerkennung, Informationsextraktion aus gescannten Dokumenten, Videoklassifizierung und Beantwortung visueller Fragen.</li></ul> <p>Unsere Bibliothek unterstützt die nahtlose Integration von drei der beliebtesten Deep-Learning-Bibliotheken: <a href="https://pytorch.org/" rel="nofollow">PyTorch</a>, <a href="https://www.tensorflow.org/" rel="nofollow">TensorFlow</a> und <a href="https://jax.readthedocs.io/en/latest/" rel="nofollow">JAX</a>. Trainieren Sie Ihr Modell in drei Codezeilen in einem Framework und laden Sie es zur Inferenz mit einem anderen.</p> <p>Jede 🤗 Transformers-Architektur ist in einem eigenständigen Python-Modul definiert, so dass sie leicht für Forschung und Experimente angepasst werden kann.</p> <h2 class="relative group"><a id="wenn-sie-auf-der-suche-nach-individueller-untersttzung-durch-das-hugging-faceteam-sind" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#wenn-sie-auf-der-suche-nach-individueller-untersttzung-durch-das-hugging-faceteam-sind"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Wenn Sie auf der Suche nach individueller Unterstützung durch das Hugging Face-Team sind </span></h2> <a target="_blank" href="https://huggingface.co/support"><img alt="HuggingFace Expert Acceleration Program" src="https://cdn-media.huggingface.co/marketing/transformers/new-support-improved.png" style="max-width: 600px; border: 1px solid #eee; border-radius: 4px; box-shadow: 0 1px 2px 0 rgba(0, 0, 0, 0.05);"> </a><br> <h2 class="relative group"><a id="inhalt" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#inhalt"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Inhalt </span></h2> <p>Die Dokumentation ist in fünf Teile gegliedert:</p> <ul><li><p><strong>GET STARTED</strong> enthält eine kurze Tour und Installationsanweisungen, um mit 🤗 Transformers loszulegen.</p></li> <li><p><strong>TUTORIALS</strong> sind ein hervorragender Ausgangspunkt, wenn Sie neu in unserer Bibliothek sind. Dieser Abschnitt hilft Ihnen, die grundlegenden Fähigkeiten zu erlangen, die Sie benötigen, um mit 🤗 Transformers zu arbeiten.</p></li> <li><p><strong>HOW-TO GUIDES</strong> zeigen Ihnen, wie Sie ein bestimmtes Ziel erreichen können, z. B. die Feinabstimmung eines vortrainierten Modells für die Sprachmodellierung oder die Erstellung eines benutzerdefinierten Modellkopfs.</p></li> <li><p><strong>KONZEPTUELLE ANLEITUNGEN</strong> bietet weitere Diskussionen und Erklärungen zu den zugrunde liegenden Konzepten und Ideen hinter Modellen, Aufgaben und der Designphilosophie von 🤗 Transformers. </p></li> <li><p><strong>API</strong> beschreibt jede Klasse und Funktion, gruppiert in:</p> <ul><li><strong>MAIN CLASSES</strong> für die Hauptklassen, die die wichtigsten APIs der Bibliothek darstellen.</li> <li>MODELLE** für die Klassen und Funktionen, die zu jedem in der Bibliothek implementierten Modell gehören.</li> <li><strong>INTERNAL HELPERS</strong> für die Klassen und Funktionen, die wir intern verwenden.</li></ul></li></ul> <p>Die Bibliothek enthält derzeit JAX-, PyTorch- und TensorFlow-Implementierungen, vortrainierte Modellgewichte, Nutzungsskripte und Konvertierungsprogramme für die folgenden Modelle.</p> <h3 class="relative group"><a id="untersttze-modelle" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#untersttze-modelle"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Unterstütze Modelle </span></h3> <ol><li><strong><a href="model_doc/albert">ALBERT</a></strong> (from Google Research and the Toyota Technological Institute at Chicago) released with the paper <a href="https://arxiv.org/abs/1909.11942" rel="nofollow">ALBERT: A Lite BERT for Self-supervised Learning of Language Representations</a>, by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.</li> <li><strong><a href="model_doc/bart">BART</a></strong> (from Facebook) released with the paper <a href="https://arxiv.org/abs/1910.13461" rel="nofollow">BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension</a> by Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov and Luke Zettlemoyer.</li> <li><strong><a href="model_doc/barthez">BARThez</a></strong> (from École polytechnique) released with the paper <a href="https://arxiv.org/abs/2010.12321" rel="nofollow">BARThez: a Skilled Pretrained French Sequence-to-Sequence Model</a> by Moussa Kamal Eddine, Antoine J.-P. Tixier, Michalis Vazirgiannis.</li> <li><strong><a href="model_doc/bartpho">BARTpho</a></strong> (from VinAI Research) released with the paper <a href="https://arxiv.org/abs/2109.09701" rel="nofollow">BARTpho: Pre-trained Sequence-to-Sequence Models for Vietnamese</a> by Nguyen Luong Tran, Duong Minh Le and Dat Quoc Nguyen.</li> <li><strong><a href="model_doc/beit">BEiT</a></strong> (from Microsoft) released with the paper <a href="https://arxiv.org/abs/2106.08254" rel="nofollow">BEiT: BERT Pre-Training of Image Transformers</a> by Hangbo Bao, Li Dong, Furu Wei.</li> <li><strong><a href="model_doc/bert">BERT</a></strong> (from Google) released with the paper <a href="https://arxiv.org/abs/1810.04805" rel="nofollow">BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding</a> by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova.</li> <li><strong><a href="model_doc/bert-generation">BERT For Sequence Generation</a></strong> (from Google) released with the paper <a href="https://arxiv.org/abs/1907.12461" rel="nofollow">Leveraging Pre-trained Checkpoints for Sequence Generation Tasks</a> by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.</li> <li><strong><a href="model_doc/bertweet">BERTweet</a></strong> (from VinAI Research) released with the paper <a href="https://aclanthology.org/2020.emnlp-demos.2/" rel="nofollow">BERTweet: A pre-trained language model for English Tweets</a> by Dat Quoc Nguyen, Thanh Vu and Anh Tuan Nguyen.</li> <li><strong><a href="model_doc/bigbird_pegasus">BigBird-Pegasus</a></strong> (from Google Research) released with the paper <a href="https://arxiv.org/abs/2007.14062" rel="nofollow">Big Bird: Transformers for Longer Sequences</a> by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.</li> <li><strong><a href="model_doc/big_bird">BigBird-RoBERTa</a></strong> (from Google Research) released with the paper <a href="https://arxiv.org/abs/2007.14062" rel="nofollow">Big Bird: Transformers for Longer Sequences</a> by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.</li> <li><strong><a href="model_doc/blenderbot">Blenderbot</a></strong> (from Facebook) released with the paper <a href="https://arxiv.org/abs/2004.13637" rel="nofollow">Recipes for building an open-domain chatbot</a> by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.</li> <li><strong><a href="model_doc/blenderbot-small">BlenderbotSmall</a></strong> (from Facebook) released with the paper <a href="https://arxiv.org/abs/2004.13637" rel="nofollow">Recipes for building an open-domain chatbot</a> by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.</li> <li><strong><a href="model_doc/bloom">BLOOM</a></strong> (from BigScience workshop) released by the <a href="https://bigscience.huggingface.co/" rel="nofollow">BigSicence Workshop</a>.</li> <li><strong><a href="model_doc/bort">BORT</a></strong> (from Alexa) released with the paper <a href="https://arxiv.org/abs/2010.10499" rel="nofollow">Optimal Subarchitecture Extraction For BERT</a> by Adrian de Wynter and Daniel J. Perry.</li> <li><strong><a href="model_doc/byt5">ByT5</a></strong> (from Google Research) released with the paper <a href="https://arxiv.org/abs/2105.13626" rel="nofollow">ByT5: Towards a token-free future with pre-trained byte-to-byte models</a> by Linting Xue, Aditya Barua, Noah Constant, Rami Al-Rfou, Sharan Narang, Mihir Kale, Adam Roberts, Colin Raffel.</li> <li><strong><a href="model_doc/camembert">CamemBERT</a></strong> (from Inria/Facebook/Sorbonne) released with the paper <a href="https://arxiv.org/abs/1911.03894" rel="nofollow">CamemBERT: a Tasty French Language Model</a> by Louis Martin<em>, Benjamin Muller</em>, Pedro Javier Ortiz Suárez*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot.</li> <li><strong><a href="model_doc/canine">CANINE</a></strong> (from Google Research) released with the paper <a href="https://arxiv.org/abs/2103.06874" rel="nofollow">CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation</a> by Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting.</li> <li><strong><a href="model_doc/clip">CLIP</a></strong> (from OpenAI) released with the paper <a href="https://arxiv.org/abs/2103.00020" rel="nofollow">Learning Transferable Visual Models From Natural Language Supervision</a> by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever.</li> <li><strong><a href="model_doc/codegen">CodeGen</a></strong> (from Salesforce) released with the paper <a href="https://arxiv.org/abs/2203.13474" rel="nofollow">A Conversational Paradigm for Program Synthesis</a> by Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong.</li> <li><strong><a href="model_doc/convbert">ConvBERT</a></strong> (from YituTech) released with the paper <a href="https://arxiv.org/abs/2008.02496" rel="nofollow">ConvBERT: Improving BERT with Span-based Dynamic Convolution</a> by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan.</li> <li><strong><a href="model_doc/convnext">ConvNeXT</a></strong> (from Facebook AI) released with the paper <a href="https://arxiv.org/abs/2201.03545" rel="nofollow">A ConvNet for the 2020s</a> by Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie.</li> <li><strong><a href="model_doc/cpm">CPM</a></strong> (from Tsinghua University) released with the paper <a href="https://arxiv.org/abs/2012.00413" rel="nofollow">CPM: A Large-scale Generative Chinese Pre-trained Language Model</a> by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun.</li> <li><strong><a href="model_doc/ctrl">CTRL</a></strong> (from Salesforce) released with the paper <a href="https://arxiv.org/abs/1909.05858" rel="nofollow">CTRL: A Conditional Transformer Language Model for Controllable Generation</a> by Nitish Shirish Keskar<em>, Bryan McCann</em>, Lav R. Varshney, Caiming Xiong and Richard Socher.</li> <li><strong><a href="model_doc/cvt">CvT</a></strong> (from Microsoft) released with the paper <a href="https://arxiv.org/abs/2103.15808" rel="nofollow">CvT: Introducing Convolutions to Vision Transformers</a> by Haiping Wu, Bin Xiao, Noel Codella, Mengchen Liu, Xiyang Dai, Lu Yuan, Lei Zhang.</li> <li><strong><a href="model_doc/data2vec">Data2Vec</a></strong> (from Facebook) released with the paper <a href="https://arxiv.org/abs/2202.03555" rel="nofollow">Data2Vec: A General Framework for Self-supervised Learning in Speech, Vision and Language</a> by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu, Michael Auli.</li> <li><strong><a href="model_doc/deberta">DeBERTa</a></strong> (from Microsoft) released with the paper <a href="https://arxiv.org/abs/2006.03654" rel="nofollow">DeBERTa: Decoding-enhanced BERT with Disentangled Attention</a> by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen.</li> <li><strong><a href="model_doc/deberta-v2">DeBERTa-v2</a></strong> (from Microsoft) released with the paper <a href="https://arxiv.org/abs/2006.03654" rel="nofollow">DeBERTa: Decoding-enhanced BERT with Disentangled Attention</a> by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen.</li> <li><strong><a href="model_doc/decision_transformer">Decision Transformer</a></strong> (from Berkeley/Facebook/Google) released with the paper <a href="https://arxiv.org/abs/2106.01345" rel="nofollow">Decision Transformer: Reinforcement Learning via Sequence Modeling</a> by Lili Chen, Kevin Lu, Aravind Rajeswaran, Kimin Lee, Aditya Grover, Michael Laskin, Pieter Abbeel, Aravind Srinivas, Igor Mordatch.</li> <li><strong><a href="model_doc/deit">DeiT</a></strong> (from Facebook) released with the paper <a href="https://arxiv.org/abs/2012.12877" rel="nofollow">Training data-efficient image transformers &amp; distillation through attention</a> by Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou.</li> <li><strong><a href="model_doc/detr">DETR</a></strong> (from Facebook) released with the paper <a href="https://arxiv.org/abs/2005.12872" rel="nofollow">End-to-End Object Detection with Transformers</a> by Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, Sergey Zagoruyko.</li> <li><strong><a href="model_doc/dialogpt">DialoGPT</a></strong> (from Microsoft Research) released with the paper <a href="https://arxiv.org/abs/1911.00536" rel="nofollow">DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation</a> by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan.</li> <li><strong><a href="model_doc/distilbert">DistilBERT</a></strong> (from HuggingFace), released together with the paper <a href="https://arxiv.org/abs/1910.01108" rel="nofollow">DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter</a> by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into <a href="https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation" rel="nofollow">DistilGPT2</a>, RoBERTa into <a href="https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation" rel="nofollow">DistilRoBERTa</a>, Multilingual BERT into <a href="https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation" rel="nofollow">DistilmBERT</a> and a German version of DistilBERT.</li> <li><strong><a href="model_doc/dit">DiT</a></strong> (from Microsoft Research) released with the paper <a href="https://arxiv.org/abs/2203.02378" rel="nofollow">DiT: Self-supervised Pre-training for Document Image Transformer</a> by Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei.</li> <li><strong><a href="model_doc/dpr">DPR</a></strong> (from Facebook) released with the paper <a href="https://arxiv.org/abs/2004.04906" rel="nofollow">Dense Passage Retrieval for Open-Domain Question Answering</a> by Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih.</li> <li><strong><a href="master/model_doc/dpt">DPT</a></strong> (from Intel Labs) released with the paper <a href="https://arxiv.org/abs/2103.13413" rel="nofollow">Vision Transformers for Dense Prediction</a> by René Ranftl, Alexey Bochkovskiy, Vladlen Koltun.</li> <li><strong><a href="model_doc/electra">ELECTRA</a></strong> (from Google Research/Stanford University) released with the paper <a href="https://arxiv.org/abs/2003.10555" rel="nofollow">ELECTRA: Pre-training text encoders as discriminators rather than generators</a> by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning.</li> <li><strong><a href="model_doc/encoder-decoder">EncoderDecoder</a></strong> (from Google Research) released with the paper <a href="https://arxiv.org/abs/1907.12461" rel="nofollow">Leveraging Pre-trained Checkpoints for Sequence Generation Tasks</a> by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.</li> <li><strong><a href="model_doc/flaubert">FlauBERT</a></strong> (from CNRS) released with the paper <a href="https://arxiv.org/abs/1912.05372" rel="nofollow">FlauBERT: Unsupervised Language Model Pre-training for French</a> by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab.</li> <li><strong><a href="model_doc/flava">FLAVA</a></strong> (from Facebook AI) released with the paper <a href="https://arxiv.org/abs/2112.04482" rel="nofollow">FLAVA: A Foundational Language And Vision Alignment Model</a> by Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela.</li> <li><strong><a href="model_doc/fnet">FNet</a></strong> (from Google Research) released with the paper <a href="https://arxiv.org/abs/2105.03824" rel="nofollow">FNet: Mixing Tokens with Fourier Transforms</a> by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon.</li> <li><strong><a href="model_doc/funnel">Funnel Transformer</a></strong> (from CMU/Google Brain) released with the paper <a href="https://arxiv.org/abs/2006.03236" rel="nofollow">Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing</a> by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le.</li> <li><strong><a href="model_doc/glpn">GLPN</a></strong> (from KAIST) released with the paper <a href="https://arxiv.org/abs/2201.07436" rel="nofollow">Global-Local Path Networks for Monocular Depth Estimation with Vertical CutDepth</a> by Doyeon Kim, Woonghyun Ga, Pyungwhan Ahn, Donggyu Joo, Sehwan Chun, Junmo Kim.</li> <li><strong><a href="model_doc/openai-gpt">GPT</a></strong> (from OpenAI) released with the paper <a href="https://blog.openai.com/language-unsupervised/" rel="nofollow">Improving Language Understanding by Generative Pre-Training</a> by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever.</li> <li><strong><a href="model_doc/gpt_neo">GPT Neo</a></strong> (from EleutherAI) released in the repository <a href="https://github.com/EleutherAI/gpt-neo" rel="nofollow">EleutherAI/gpt-neo</a> by Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy.</li> <li><strong><a href="model_doc/gpt_neox">GPT NeoX</a></strong> (from EleutherAI) released with the paper <a href="https://arxiv.org/abs/2204.06745" rel="nofollow">GPT-NeoX-20B: An Open-Source Autoregressive Language Model</a> by Sid Black, Stella Biderman, Eric Hallahan, Quentin Anthony, Leo Gao, Laurence Golding, Horace He, Connor Leahy, Kyle McDonell, Jason Phang, Michael Pieler, USVSN Sai Prashanth, Shivanshu Purohit, Laria Reynolds, Jonathan Tow, Ben Wang, Samuel Weinbach</li> <li><strong><a href="model_doc/gpt2">GPT-2</a></strong> (from OpenAI) released with the paper <a href="https://blog.openai.com/better-language-models/" rel="nofollow">Language Models are Unsupervised Multitask Learners</a> by Alec Radford<em>, Jeffrey Wu</em>, Rewon Child, David Luan, Dario Amodei<strong>and Ilya Sutskever</strong>.</li> <li><strong><a href="model_doc/gptj">GPT-J</a></strong> (from EleutherAI) released in the repository <a href="https://github.com/kingoflolz/mesh-transformer-jax/" rel="nofollow">kingoflolz/mesh-transformer-jax</a> by Ben Wang and Aran Komatsuzaki.</li> <li><strong><a href="model_doc/groupvit">GroupViT</a></strong> (from UCSD, NVIDIA) released with the paper <a href="https://arxiv.org/abs/2202.11094" rel="nofollow">GroupViT: Semantic Segmentation Emerges from Text Supervision</a> by Jiarui Xu, Shalini De Mello, Sifei Liu, Wonmin Byeon, Thomas Breuel, Jan Kautz, Xiaolong Wang.</li> <li><strong><a href="model_doc/hubert">Hubert</a></strong> (from Facebook) released with the paper <a href="https://arxiv.org/abs/2106.07447" rel="nofollow">HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units</a> by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed.</li> <li><strong><a href="model_doc/ibert">I-BERT</a></strong> (from Berkeley) released with the paper <a href="https://arxiv.org/abs/2101.01321" rel="nofollow">I-BERT: Integer-only BERT Quantization</a> by Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W. Mahoney, Kurt Keutzer.</li> <li><strong><a href="model_doc/imagegpt">ImageGPT</a></strong> (from OpenAI) released with the paper <a href="https://openai.com/blog/image-gpt/" rel="nofollow">Generative Pretraining from Pixels</a> by Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, Ilya Sutskever.</li> <li><strong><a href="model_doc/layoutlm">LayoutLM</a></strong> (from Microsoft Research Asia) released with the paper <a href="https://arxiv.org/abs/1912.13318" rel="nofollow">LayoutLM: Pre-training of Text and Layout for Document Image Understanding</a> by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou.</li> <li><strong><a href="model_doc/layoutlmv2">LayoutLMv2</a></strong> (from Microsoft Research Asia) released with the paper <a href="https://arxiv.org/abs/2012.14740" rel="nofollow">LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding</a> by Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou.</li> <li><strong><a href="model_doc/layoutlmv3">LayoutLMv3</a></strong> (from Microsoft Research Asia) released with the paper <a href="https://arxiv.org/abs/2204.08387" rel="nofollow">LayoutLMv3: Pre-training for Document AI with Unified Text and Image Masking</a> by Yupan Huang, Tengchao Lv, Lei Cui, Yutong Lu, Furu Wei.</li> <li><strong><a href="model_doc/layoutxlm">LayoutXLM</a></strong> (from Microsoft Research Asia) released with the paper <a href="https://arxiv.org/abs/2104.08836" rel="nofollow">LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding</a> by Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei.</li> <li><strong><a href="model_doc/led">LED</a></strong> (from AllenAI) released with the paper <a href="https://arxiv.org/abs/2004.05150" rel="nofollow">Longformer: The Long-Document Transformer</a> by Iz Beltagy, Matthew E. Peters, Arman Cohan.</li> <li><strong><a href="model_doc/levit">LeViT</a></strong> (from Meta AI) released with the paper <a href="https://arxiv.org/abs/2104.01136" rel="nofollow">LeViT: A Vision Transformer in ConvNet’s Clothing for Faster Inference</a> by Ben Graham, Alaaeldin El-Nouby, Hugo Touvron, Pierre Stock, Armand Joulin, Hervé Jégou, Matthijs Douze.</li> <li><strong><a href="model_doc/longformer">Longformer</a></strong> (from AllenAI) released with the paper <a href="https://arxiv.org/abs/2004.05150" rel="nofollow">Longformer: The Long-Document Transformer</a> by Iz Beltagy, Matthew E. Peters, Arman Cohan.</li> <li><strong><a href="model_doc/longt5">LongT5</a></strong> (from Google AI) released with the paper <a href="https://arxiv.org/abs/2112.07916" rel="nofollow">LongT5: Efficient Text-To-Text Transformer for Long Sequences</a> by Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo Ni, Yun-Hsuan Sung, Yinfei Yang.</li> <li><strong><a href="model_doc/luke">LUKE</a></strong> (from Studio Ousia) released with the paper <a href="https://arxiv.org/abs/2010.01057" rel="nofollow">LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention</a> by Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda, Yuji Matsumoto.</li> <li><strong><a href="model_doc/lxmert">LXMERT</a></strong> (from UNC Chapel Hill) released with the paper <a href="https://arxiv.org/abs/1908.07490" rel="nofollow">LXMERT: Learning Cross-Modality Encoder Representations from Transformers for Open-Domain Question Answering</a> by Hao Tan and Mohit Bansal.</li> <li><strong><a href="model_doc/mctct">M-CTC-T</a></strong> (from Facebook) released with the paper <a href="https://arxiv.org/abs/2111.00161" rel="nofollow">Pseudo-Labeling For Massively Multilingual Speech Recognition</a> by Loren Lugosch, Tatiana Likhomanenko, Gabriel Synnaeve, and Ronan Collobert.</li> <li><strong><a href="model_doc/m2m_100">M2M100</a></strong> (from Facebook) released with the paper <a href="https://arxiv.org/abs/2010.11125" rel="nofollow">Beyond English-Centric Multilingual Machine Translation</a> by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin.</li> <li><strong><a href="model_doc/marian">MarianMT</a></strong> Machine translation models trained using <a href="http://opus.nlpl.eu/" rel="nofollow">OPUS</a> data by Jörg Tiedemann. The <a href="https://marian-nmt.github.io/" rel="nofollow">Marian Framework</a> is being developed by the Microsoft Translator Team.</li> <li><strong><a href="model_doc/maskformer">MaskFormer</a></strong> (from Meta and UIUC) released with the paper <a href="https://arxiv.org/abs/2107.06278" rel="nofollow">Per-Pixel Classification is Not All You Need for Semantic Segmentation</a> by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov.</li> <li><strong><a href="model_doc/mbart">mBART</a></strong> (from Facebook) released with the paper <a href="https://arxiv.org/abs/2001.08210" rel="nofollow">Multilingual Denoising Pre-training for Neural Machine Translation</a> by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer.</li> <li><strong><a href="model_doc/mbart">mBART-50</a></strong> (from Facebook) released with the paper <a href="https://arxiv.org/abs/2008.00401" rel="nofollow">Multilingual Translation with Extensible Multilingual Pretraining and Finetuning</a> by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan.</li> <li><strong><a href="model_doc/megatron-bert">Megatron-BERT</a></strong> (from NVIDIA) released with the paper <a href="https://arxiv.org/abs/1909.08053" rel="nofollow">Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism</a> by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.</li> <li><strong><a href="model_doc/megatron_gpt2">Megatron-GPT2</a></strong> (from NVIDIA) released with the paper <a href="https://arxiv.org/abs/1909.08053" rel="nofollow">Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism</a> by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.</li> <li><strong><a href="model_doc/mluke">mLUKE</a></strong> (from Studio Ousia) released with the paper <a href="https://arxiv.org/abs/2110.08151" rel="nofollow">mLUKE: The Power of Entity Representations in Multilingual Pretrained Language Models</a> by Ryokan Ri, Ikuya Yamada, and Yoshimasa Tsuruoka.</li> <li><strong><a href="model_doc/mobilebert">MobileBERT</a></strong> (from CMU/Google Brain) released with the paper <a href="https://arxiv.org/abs/2004.02984" rel="nofollow">MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited Devices</a> by Zhiqing Sun, Hongkun Yu, Xiaodan Song, Renjie Liu, Yiming Yang, and Denny Zhou.</li> <li><strong><a href="model_doc/mobilevit">MobileViT</a></strong> (from Apple) released with the paper <a href="https://arxiv.org/abs/2110.02178" rel="nofollow">MobileViT: Light-weight, General-purpose, and Mobile-friendly Vision Transformer</a> by Sachin Mehta and Mohammad Rastegari.</li> <li><strong><a href="model_doc/mpnet">MPNet</a></strong> (from Microsoft Research) released with the paper <a href="https://arxiv.org/abs/2004.09297" rel="nofollow">MPNet: Masked and Permuted Pre-training for Language Understanding</a> by Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, Tie-Yan Liu.</li> <li><strong><a href="model_doc/mt5">MT5</a></strong> (from Google AI) released with the paper <a href="https://arxiv.org/abs/2010.11934" rel="nofollow">mT5: A massively multilingual pre-trained text-to-text transformer</a> by Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel.</li> <li><strong><a href="model_doc/mvp">MVP</a></strong> (from RUC AI Box) released with the paper <a href="https://arxiv.org/abs/2206.12131" rel="nofollow">MVP: Multi-task Supervised Pre-training for Natural Language Generation</a> by Tianyi Tang, Junyi Li, Wayne Xin Zhao and Ji-Rong Wen.</li> <li><strong><a href="model_doc/nezha">Nezha</a></strong> (from Huawei Noah’s Ark Lab) released with the paper <a href="https://arxiv.org/abs/1909.00204" rel="nofollow">NEZHA: Neural Contextualized Representation for Chinese Language Understanding</a> by Junqiu Wei, Xiaozhe Ren, Xiaoguang Li, Wenyong Huang, Yi Liao, Yasheng Wang, Jiashu Lin, Xin Jiang, Xiao Chen and Qun Liu.</li> <li><strong><a href="model_doc/nllb">NLLB</a></strong> (from Meta) released with the paper <a href="https://arxiv.org/abs/2207.04672" rel="nofollow">No Language Left Behind: Scaling Human-Centered Machine Translation</a> by the NLLB team.</li> <li><strong><a href="model_doc/nystromformer">Nyströmformer</a></strong> (from the University of Wisconsin - Madison) released with the paper <a href="https://arxiv.org/abs/2102.03902" rel="nofollow">Nyströmformer: A Nyström-Based Algorithm for Approximating Self-Attention</a> by Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, Vikas Singh.</li> <li><strong><a href="master/model_doc/opt">OPT</a></strong> (from Meta AI) released with the paper <a href="https://arxiv.org/abs/2205.01068" rel="nofollow">OPT: Open Pre-trained Transformer Language Models</a> by Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al.</li> <li><strong><a href="model_doc/owlvit">OWL-ViT</a></strong> (from Google AI) released with the paper <a href="https://arxiv.org/abs/2205.06230" rel="nofollow">Simple Open-Vocabulary Object Detection with Vision Transformers</a> by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby.</li> <li><strong><a href="model_doc/pegasus">Pegasus</a></strong> (from Google) released with the paper <a href="https://arxiv.org/abs/1912.08777" rel="nofollow">PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization</a> by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu.</li> <li><strong><a href="model_doc/perceiver">Perceiver IO</a></strong> (from Deepmind) released with the paper <a href="https://arxiv.org/abs/2107.14795" rel="nofollow">Perceiver IO: A General Architecture for Structured Inputs &amp; Outputs</a> by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira.</li> <li><strong><a href="model_doc/phobert">PhoBERT</a></strong> (from VinAI Research) released with the paper <a href="https://www.aclweb.org/anthology/2020.findings-emnlp.92/" rel="nofollow">PhoBERT: Pre-trained language models for Vietnamese</a> by Dat Quoc Nguyen and Anh Tuan Nguyen.</li> <li><strong><a href="model_doc/plbart">PLBart</a></strong> (from UCLA NLP) released with the paper <a href="https://arxiv.org/abs/2103.06333" rel="nofollow">Unified Pre-training for Program Understanding and Generation</a> by Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang.</li> <li><strong><a href="model_doc/poolformer">PoolFormer</a></strong> (from Sea AI Labs) released with the paper <a href="https://arxiv.org/abs/2111.11418" rel="nofollow">MetaFormer is Actually What You Need for Vision</a> by Yu, Weihao and Luo, Mi and Zhou, Pan and Si, Chenyang and Zhou, Yichen and Wang, Xinchao and Feng, Jiashi and Yan, Shuicheng.</li> <li><strong><a href="model_doc/prophetnet">ProphetNet</a></strong> (from Microsoft Research) released with the paper <a href="https://arxiv.org/abs/2001.04063" rel="nofollow">ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training</a> by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.</li> <li><strong><a href="model_doc/qdqbert">QDQBert</a></strong> (from NVIDIA) released with the paper <a href="https://arxiv.org/abs/2004.09602" rel="nofollow">Integer Quantization for Deep Learning Inference: Principles and Empirical Evaluation</a> by Hao Wu, Patrick Judd, Xiaojie Zhang, Mikhail Isaev and Paulius Micikevicius.</li> <li><strong><a href="model_doc/rag">RAG</a></strong> (from Facebook) released with the paper <a href="https://arxiv.org/abs/2005.11401" rel="nofollow">Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks</a> by Patrick Lewis, Ethan Perez, Aleksandara Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich Küttler, Mike Lewis, Wen-tau Yih, Tim Rocktäschel, Sebastian Riedel, Douwe Kiela.</li> <li><strong><a href="model_doc/realm.html">REALM</a></strong> (from Google Research) released with the paper <a href="https://arxiv.org/abs/2002.08909" rel="nofollow">REALM: Retrieval-Augmented Language Model Pre-Training</a> by Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat and Ming-Wei Chang.</li> <li><strong><a href="model_doc/reformer">Reformer</a></strong> (from Google Research) released with the paper <a href="https://arxiv.org/abs/2001.04451" rel="nofollow">Reformer: The Efficient Transformer</a> by Nikita Kitaev, Łukasz Kaiser, Anselm Levskaya.</li> <li><strong><a href="model_doc/regnet">RegNet</a></strong> (from META Platforms) released with the paper <a href="https://arxiv.org/abs/2003.13678" rel="nofollow">Designing Network Design Space</a> by Ilija Radosavovic, Raj Prateek Kosaraju, Ross Girshick, Kaiming He, Piotr Dollár.</li> <li><strong><a href="model_doc/rembert">RemBERT</a></strong> (from Google Research) released with the paper <a href="https://arxiv.org/abs/2010.12821" rel="nofollow">Rethinking embedding coupling in pre-trained language models</a> by Hyung Won Chung, Thibault Févry, Henry Tsai, M. Johnson, Sebastian Ruder.</li> <li><strong><a href="model_doc/resnet">ResNet</a></strong> (from Microsoft Research) released with the paper <a href="https://arxiv.org/abs/1512.03385" rel="nofollow">Deep Residual Learning for Image Recognition</a> by Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun.</li> <li><strong><a href="model_doc/roberta">RoBERTa</a></strong> (from Facebook), released together with the paper <a href="https://arxiv.org/abs/1907.11692" rel="nofollow">RoBERTa: A Robustly Optimized BERT Pretraining Approach</a> by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov.</li> <li><strong><a href="model_doc/roformer">RoFormer</a></strong> (from ZhuiyiTechnology), released together with the paper <a href="https://arxiv.org/abs/2104.09864" rel="nofollow">RoFormer: Enhanced Transformer with Rotary Position Embedding</a> by Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu.</li> <li><strong><a href="model_doc/segformer">SegFormer</a></strong> (from NVIDIA) released with the paper <a href="https://arxiv.org/abs/2105.15203" rel="nofollow">SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers</a> by Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo.</li> <li><strong><a href="model_doc/sew">SEW</a></strong> (from ASAPP) released with the paper <a href="https://arxiv.org/abs/2109.06870" rel="nofollow">Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition</a> by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.</li> <li><strong><a href="model_doc/sew_d">SEW-D</a></strong> (from ASAPP) released with the paper <a href="https://arxiv.org/abs/2109.06870" rel="nofollow">Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition</a> by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.</li> <li><strong><a href="model_doc/speech_to_text">SpeechToTextTransformer</a></strong> (from Facebook), released together with the paper <a href="https://arxiv.org/abs/2010.05171" rel="nofollow">fairseq S2T: Fast Speech-to-Text Modeling with fairseq</a> by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Dmytro Okhonko, Juan Pino.</li> <li><strong><a href="model_doc/speech_to_text_2">SpeechToTextTransformer2</a></strong> (from Facebook), released together with the paper <a href="https://arxiv.org/abs/2104.06678" rel="nofollow">Large-Scale Self- and Semi-Supervised Learning for Speech Translation</a> by Changhan Wang, Anne Wu, Juan Pino, Alexei Baevski, Michael Auli, Alexis Conneau.</li> <li><strong><a href="model_doc/splinter">Splinter</a></strong> (from Tel Aviv University), released together with the paper <a href="https://arxiv.org/abs/2101.00438" rel="nofollow">Few-Shot Question Answering by Pretraining Span Selection</a> by Ori Ram, Yuval Kirstain, Jonathan Berant, Amir Globerson, Omer Levy.</li> <li><strong><a href="model_doc/squeezebert">SqueezeBERT</a></strong> (from Berkeley) released with the paper <a href="https://arxiv.org/abs/2006.11316" rel="nofollow">SqueezeBERT: What can computer vision teach NLP about efficient neural networks?</a> by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer.</li> <li><strong><a href="model_doc/swin">Swin Transformer</a></strong> (from Microsoft) released with the paper <a href="https://arxiv.org/abs/2103.14030" rel="nofollow">Swin Transformer: Hierarchical Vision Transformer using Shifted Windows</a> by Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, Baining Guo.</li> <li><strong><a href="model_doc/swinv2">Swin Transformer V2</a></strong> (from Microsoft) released with the paper <a href="https://arxiv.org/abs/2111.09883" rel="nofollow">Swin Transformer V2: Scaling Up Capacity and Resolution</a> by Ze Liu, Han Hu, Yutong Lin, Zhuliang Yao, Zhenda Xie, Yixuan Wei, Jia Ning, Yue Cao, Zheng Zhang, Li Dong, Furu Wei, Baining Guo.</li> <li><strong><a href="model_doc/t5">T5</a></strong> (from Google AI) released with the paper <a href="https://arxiv.org/abs/1910.10683" rel="nofollow">Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer</a> by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.</li> <li><strong><a href="model_doc/t5v1.1">T5v1.1</a></strong> (from Google AI) released in the repository <a href="https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511" rel="nofollow">google-research/text-to-text-transfer-transformer</a> by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.</li> <li><strong><a href="model_doc/tapas">TAPAS</a></strong> (from Google AI) released with the paper <a href="https://arxiv.org/abs/2004.02349" rel="nofollow">TAPAS: Weakly Supervised Table Parsing via Pre-training</a> by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos.</li> <li><strong><a href="model_doc/tapex">TAPEX</a></strong> (from Microsoft Research) released with the paper <a href="https://arxiv.org/abs/2107.07653" rel="nofollow">TAPEX: Table Pre-training via Learning a Neural SQL Executor</a> by Qian Liu, Bei Chen, Jiaqi Guo, Morteza Ziyadi, Zeqi Lin, Weizhu Chen, Jian-Guang Lou.</li> <li><strong><a href="model_doc/trajectory_transformers">Trajectory Transformer</a></strong> (from the University of California at Berkeley) released with the paper <a href="https://arxiv.org/abs/2106.02039" rel="nofollow">Offline Reinforcement Learning as One Big Sequence Modeling Problem</a> by Michael Janner, Qiyang Li, Sergey Levine</li> <li><strong><a href="model_doc/transfo-xl">Transformer-XL</a></strong> (from Google/CMU) released with the paper <a href="https://arxiv.org/abs/1901.02860" rel="nofollow">Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context</a> by Zihang Dai<em>, Zhilin Yang</em>, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.</li> <li><strong><a href="model_doc/trocr">TrOCR</a></strong> (from Microsoft), released together with the paper <a href="https://arxiv.org/abs/2109.10282" rel="nofollow">TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models</a> by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei.</li> <li><strong><a href="model_doc/ul2">UL2</a></strong> (from Google Research) released with the paper <a href="https://arxiv.org/abs/2205.05131v1" rel="nofollow">Unifying Language Learning Paradigms</a> by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler</li> <li><strong><a href="model_doc/unispeech">UniSpeech</a></strong> (from Microsoft Research) released with the paper <a href="https://arxiv.org/abs/2101.07597" rel="nofollow">UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data</a> by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.</li> <li><strong><a href="model_doc/unispeech-sat">UniSpeechSat</a></strong> (from Microsoft Research) released with the paper <a href="https://arxiv.org/abs/2110.05752" rel="nofollow">UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING</a> by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu.</li> <li><strong><a href="model_doc/van">VAN</a></strong> (from Tsinghua University and Nankai University) released with the paper <a href="https://arxiv.org/abs/2202.09741" rel="nofollow">Visual Attention Network</a> by Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu.</li> <li><strong><a href="model_doc/videomae">VideoMAE</a></strong> (from Multimedia Computing Group, Nanjing University) released with the paper <a href="https://arxiv.org/abs/2203.12602" rel="nofollow">VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training</a> by Zhan Tong, Yibing Song, Jue Wang, Limin Wang.</li> <li><strong><a href="model_doc/vilt">ViLT</a></strong> (from NAVER AI Lab/Kakao Enterprise/Kakao Brain) released with the paper <a href="https://arxiv.org/abs/2102.03334" rel="nofollow">ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision</a> by Wonjae Kim, Bokyung Son, Ildoo Kim.</li> <li><strong><a href="model_doc/vit">Vision Transformer (ViT)</a></strong> (from Google AI) released with the paper <a href="https://arxiv.org/abs/2010.11929" rel="nofollow">An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale</a> by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby.</li> <li><strong><a href="model_doc/visual_bert">VisualBERT</a></strong> (from UCLA NLP) released with the paper <a href="https://arxiv.org/pdf/1908.03557" rel="nofollow">VisualBERT: A Simple and Performant Baseline for Vision and Language</a> by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang.</li> <li><strong><a href="model_doc/vit_mae">ViTMAE</a></strong> (from Meta AI) released with the paper <a href="https://arxiv.org/abs/2111.06377" rel="nofollow">Masked Autoencoders Are Scalable Vision Learners</a> by Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, Ross Girshick.</li> <li><strong><a href="model_doc/wav2vec2">Wav2Vec2</a></strong> (from Facebook AI) released with the paper <a href="https://arxiv.org/abs/2006.11477" rel="nofollow">wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations</a> by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli.</li> <li><strong><a href="model_doc/wav2vec2-conformer">Wav2Vec2-Conformer</a></strong> (from Facebook AI) released with the paper <a href="https://arxiv.org/abs/2010.05171" rel="nofollow">FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ</a> by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino.</li> <li><strong><a href="model_doc/wav2vec2_phoneme">Wav2Vec2Phoneme</a></strong> (from Facebook AI) released with the paper <a href="https://arxiv.org/abs/2109.11680" rel="nofollow">Simple and Effective Zero-shot Cross-lingual Phoneme Recognition</a> by Qiantong Xu, Alexei Baevski, Michael Auli.</li> <li><strong><a href="model_doc/wavlm">WavLM</a></strong> (from Microsoft Research) released with the paper <a href="https://arxiv.org/abs/2110.13900" rel="nofollow">WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing</a> by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei.</li> <li><strong><a href="model_doc/xglm">XGLM</a></strong> (From Facebook AI) released with the paper <a href="https://arxiv.org/abs/2112.10668" rel="nofollow">Few-shot Learning with Multilingual Language Models</a> by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O’Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li.</li> <li><strong><a href="model_doc/xlm">XLM</a></strong> (from Facebook) released together with the paper <a href="https://arxiv.org/abs/1901.07291" rel="nofollow">Cross-lingual Language Model Pretraining</a> by Guillaume Lample and Alexis Conneau.</li> <li><strong><a href="model_doc/xlm-prophetnet">XLM-ProphetNet</a></strong> (from Microsoft Research) released with the paper <a href="https://arxiv.org/abs/2001.04063" rel="nofollow">ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training</a> by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.</li> <li><strong><a href="model_doc/xlm-roberta">XLM-RoBERTa</a></strong> (from Facebook AI), released together with the paper <a href="https://arxiv.org/abs/1911.02116" rel="nofollow">Unsupervised Cross-lingual Representation Learning at Scale</a> by Alexis Conneau<em>, Kartikay Khandelwal</em>, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov.</li> <li><strong><a href="model_doc/xlm-roberta-xl">XLM-RoBERTa-XL</a></strong> (from Facebook AI), released together with the paper <a href="https://arxiv.org/abs/2105.00572" rel="nofollow">Larger-Scale Transformers for Multilingual Masked Language Modeling</a> by Naman Goyal, Jingfei Du, Myle Ott, Giri Anantharaman, Alexis Conneau.</li> <li><strong><a href="model_doc/xlnet">XLNet</a></strong> (from Google/CMU) released with the paper <a href="https://arxiv.org/abs/1906.08237" rel="nofollow">​XLNet: Generalized Autoregressive Pretraining for Language Understanding</a> by Zhilin Yang<em>, Zihang Dai</em>, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le.</li> <li><strong><a href="model_doc/xls_r">XLS-R</a></strong> (from Facebook AI) released with the paper <a href="https://arxiv.org/abs/2111.09296" rel="nofollow">XLS-R: Self-supervised Cross-lingual Speech Representation Learning at Scale</a> by Arun Babu, Changhan Wang, Andros Tjandra, Kushal Lakhotia, Qiantong Xu, Naman Goyal, Kritika Singh, Patrick von Platen, Yatharth Saraf, Juan Pino, Alexei Baevski, Alexis Conneau, Michael Auli.</li> <li><strong><a href="model_doc/xlsr_wav2vec2">XLSR-Wav2Vec2</a></strong> (from Facebook AI) released with the paper <a href="https://arxiv.org/abs/2006.13979" rel="nofollow">Unsupervised Cross-Lingual Representation Learning For Speech Recognition</a> by Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael Auli.</li> <li><strong><a href="model_doc/yolos">YOLOS</a></strong> (from Huazhong University of Science &amp; Technology) released with the paper <a href="https://arxiv.org/abs/2106.00666" rel="nofollow">You Only Look at One Sequence: Rethinking Transformer in Vision through Object Detection</a> by Yuxin Fang, Bencheng Liao, Xinggang Wang, Jiemin Fang, Jiyang Qi, Rui Wu, Jianwei Niu, Wenyu Liu.</li> <li><strong><a href="model_doc/yoso">YOSO</a></strong> (from the University of Wisconsin - Madison) released with the paper <a href="https://arxiv.org/abs/2111.09714" rel="nofollow">You Only Sample (Almost) Once: Linear Cost Self-Attention Via Bernoulli Sampling</a> by Zhanpeng Zeng, Yunyang Xiong, Sathya N. Ravi, Shailesh Acharya, Glenn Fung, Vikas Singh.</li></ol> <h3 class="relative group"><a id="untersttzte-frameworks" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#untersttzte-frameworks"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Unterstützte Frameworks </span></h3> <p>Die folgende Tabelle zeigt die derzeitige Unterstützung in der Bibliothek für jedes dieser Modelle, unabhängig davon, ob sie einen Python Tokenizer haben (als “langsam” bezeichnet), ein “schneller” Tokenizer, der von der 🤗 Tokenizers Bibliothek unterstützt wird, ob sie Unterstützung in Jax (via Flax), PyTorch, und/oder TensorFlow haben.</p> <table><thead><tr><th align="center">Model</th> <th align="center">Tokenizer slow</th> <th align="center">Tokenizer fast</th> <th align="center">PyTorch support</th> <th align="center">TensorFlow support</th> <th align="center">Flax Support</th></tr></thead> <tbody><tr><td align="center">ALBERT</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">BART</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">BEiT</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td></tr> <tr><td align="center">BERT</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">Bert Generation</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">BigBird</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td></tr> <tr><td align="center">BigBird-Pegasus</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">Blenderbot</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">BlenderbotSmall</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">BLOOM</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">CamemBERT</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">CANINE</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">CLIP</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">CodeGen</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">ConvBERT</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">ConvNeXT</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">CTRL</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">CvT</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">Data2VecAudio</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">Data2VecText</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">Data2VecVision</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">DeBERTa</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">DeBERTa-v2</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">Decision Transformer</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">DeiT</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">DETR</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">DistilBERT</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">DPR</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">DPT</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">ELECTRA</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">Encoder decoder</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">FairSeq Machine-Translation</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">FlauBERT</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">FLAVA</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">FNet</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">Funnel Transformer</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">GLPN</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">GPT Neo</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td></tr> <tr><td align="center">GPT NeoX</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">GPT-J</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">GroupViT</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">Hubert</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">I-BERT</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">ImageGPT</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">LayoutLM</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">LayoutLMv2</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">LayoutLMv3</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">LED</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">LeViT</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">Longformer</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">LongT5</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td></tr> <tr><td align="center">LUKE</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">LXMERT</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">M-CTC-T</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">M2M100</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">Marian</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">MaskFormer</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">mBART</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">Megatron-BERT</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">MobileBERT</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">MobileViT</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">MPNet</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">MT5</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">MVP</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">Nezha</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">Nyströmformer</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">OpenAI GPT</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">OpenAI GPT-2</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">OPT</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">OWL-ViT</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">Pegasus</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">Perceiver</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">PLBart</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">PoolFormer</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">ProphetNet</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">QDQBert</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">RAG</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">REALM</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">Reformer</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">RegNet</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">RemBERT</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">ResNet</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">RetriBERT</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">RoBERTa</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">RoFormer</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">SegFormer</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">SEW</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">SEW-D</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">Speech Encoder decoder</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td></tr> <tr><td align="center">Speech2Text</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">Speech2Text2</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">Splinter</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">SqueezeBERT</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">Swin Transformer</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">Swin Transformer V2</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">T5</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">TAPAS</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">Trajectory Transformer</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">Transformer-XL</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">TrOCR</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">UniSpeech</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">UniSpeechSat</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">VAN</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">VideoMAE</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">ViLT</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">Vision Encoder decoder</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">VisionTextDualEncoder</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td></tr> <tr><td align="center">VisualBERT</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">ViT</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">ViTMAE</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">Wav2Vec2</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">Wav2Vec2-Conformer</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">WavLM</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">XGLM</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td></tr> <tr><td align="center">XLM</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">XLM-ProphetNet</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">XLM-RoBERTa</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td></tr> <tr><td align="center">XLM-RoBERTa-XL</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">XLNet</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">✅</td> <td align="center">❌</td></tr> <tr><td align="center">YOLOS</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr> <tr><td align="center">YOSO</td> <td align="center">❌</td> <td align="center">❌</td> <td align="center">✅</td> <td align="center">❌</td> <td align="center">❌</td></tr></tbody></table> <script type="module" data-hydrate="1qjhicw"> import { start } from "/docs/transformers/pr_18789/de/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="1qjhicw"]').parentNode, paths: {"base":"/docs/transformers/pr_18789/de","assets":"/docs/transformers/pr_18789/de"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_18789/de/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_18789/de/_app/pages/index.mdx-hf-doc-builder.js") ], params: {} } }); </script>
497
0
hf_public_repos/doc-build-dev/transformers/pr_18789
hf_public_repos/doc-build-dev/transformers/pr_18789/de/model_sharing.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;ein-modell-teilen&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;repositoryfunktionen&quot;,&quot;title&quot;:&quot;Repository-Funktionen&quot;},{&quot;local&quot;:&quot;einrichtung&quot;,&quot;title&quot;:&quot;Einrichtung&quot;},{&quot;local&quot;:&quot;ein-modell-fr-alle-frameworks-konvertieren&quot;,&quot;title&quot;:&quot;Ein Modell für alle Frameworks konvertieren&quot;},{&quot;local&quot;:&quot;ein-modell-whrend-des-trainings-hochladen&quot;,&quot;title&quot;:&quot;Ein Modell während des Trainings hochladen&quot;},{&quot;local&quot;:&quot;verwenden-sie-die-funktion-pushtohub&quot;,&quot;title&quot;:&quot;Verwenden Sie die Funktion `push_to_hub`.&quot;},{&quot;local&quot;:&quot;hochladen-mit-der-weboberflche&quot;,&quot;title&quot;:&quot;Hochladen mit der Weboberfläche&quot;},{&quot;local&quot;:&quot;hinzufgen-einer-modellkarte&quot;,&quot;title&quot;:&quot;Hinzufügen einer Modellkarte&quot;}],&quot;title&quot;:&quot;Ein Modell teilen&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/pages/model_sharing.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/chunks/Tip-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/chunks/Youtube-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/chunks/IconCopyLink-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/chunks/CodeBlock-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/chunks/Markdown-hf-doc-builder.js"> <h1 class="relative group"><a id="ein-modell-teilen" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#ein-modell-teilen"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Ein Modell teilen </span></h1> <p>Die letzten beiden Tutorials haben gezeigt, wie man ein Modell mit PyTorch, Keras und 🤗 Accelerate für verteilte Setups feinabstimmen kann. Der nächste Schritt besteht darin, Ihr Modell mit der Community zu teilen! Bei Hugging Face glauben wir an den offenen Austausch von Wissen und Ressourcen, um künstliche Intelligenz für alle zu demokratisieren. Wir ermutigen Sie, Ihr Modell mit der Community zu teilen, um anderen zu helfen, Zeit und Ressourcen zu sparen.</p> <p>In diesem Tutorial lernen Sie zwei Methoden kennen, wie Sie ein trainiertes oder verfeinertes Modell auf dem <a href="https://huggingface.co/models" rel="nofollow">Model Hub</a> teilen können:</p> <ul><li>Programmgesteuertes Übertragen Ihrer Dateien auf den Hub.</li> <li>Ziehen Sie Ihre Dateien per Drag-and-Drop über die Weboberfläche in den Hub.</li></ul> <iframe width="560" height="315" src="https://www.youtube.com/embed/XvSGPZFEjDY" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p>Um ein Modell mit der Öffentlichkeit zu teilen, benötigen Sie ein Konto auf <a href="https://huggingface.co/join" rel="nofollow">huggingface.co</a>. Sie können auch einer bestehenden Organisation beitreten oder eine neue Organisation gründen.</p></div> <h2 class="relative group"><a id="repositoryfunktionen" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#repositoryfunktionen"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Repository-Funktionen </span></h2> <p>Jedes Repository im Model Hub verhält sich wie ein typisches GitHub-Repository. Unsere Repositorys bieten Versionierung, Commit-Historie und die Möglichkeit, Unterschiede zu visualisieren.</p> <p>Die integrierte Versionierung des Model Hub basiert auf Git und <a href="https://git-lfs.github.com/" rel="nofollow">git-lfs</a>. Mit anderen Worten: Sie können ein Modell als ein Repository behandeln, was eine bessere Zugriffskontrolle und Skalierbarkeit ermöglicht. Die Versionskontrolle ermöglicht <em>Revisionen</em>, eine Methode zum Anheften einer bestimmten Version eines Modells mit einem Commit-Hash, Tag oder Branch.</p> <p>Folglich können Sie eine bestimmte Modellversion mit dem Parameter “Revision” laden:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModel.from_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;julien-c/EsperBERTo-small&quot;</span>, revision=<span class="hljs-string">&quot;v2.0.1&quot;</span> <span class="hljs-comment"># tag name, or branch name, or commit hash</span> <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <p>Dateien lassen sich auch in einem Repository leicht bearbeiten, und Sie können die Commit-Historie sowie die Unterschiede einsehen:</p> <p><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/vis_diff.png" alt="vis_diff"></p> <h2 class="relative group"><a id="einrichtung" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#einrichtung"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Einrichtung </span></h2> <p>Bevor Sie ein Modell für den Hub freigeben, benötigen Sie Ihre Hugging Face-Anmeldedaten. Wenn Sie Zugang zu einem Terminal haben, führen Sie den folgenden Befehl in der virtuellen Umgebung aus, in der 🤗 Transformers installiert ist. Dadurch werden Ihre Zugangsdaten in Ihrem Hugging Face-Cache-Ordner (standardmäßig <code>~/.cache/</code>) gespeichert:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->huggingface-cli login<!-- HTML_TAG_END --></pre></div> <p>Wenn Sie ein Notebook wie Jupyter oder Colaboratory verwenden, stellen Sie sicher, dass Sie die <a href="https://huggingface.co/docs/hub/adding-a-library" rel="nofollow"><code>huggingface_hub</code></a> Bibliothek installiert haben. Diese Bibliothek ermöglicht Ihnen die programmatische Interaktion mit dem Hub.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install huggingface_hub<!-- HTML_TAG_END --></pre></div> <p>Verwenden Sie dann <code>notebook_login</code>, um sich beim Hub anzumelden, und folgen Sie dem Link <a href="https://huggingface.co/settings/token" rel="nofollow">hier</a>, um ein Token für die Anmeldung zu generieren:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> huggingface_hub <span class="hljs-keyword">import</span> notebook_login <span class="hljs-meta">&gt;&gt;&gt; </span>notebook_login()<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="ein-modell-fr-alle-frameworks-konvertieren" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#ein-modell-fr-alle-frameworks-konvertieren"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Ein Modell für alle Frameworks konvertieren </span></h2> <p>Um sicherzustellen, dass Ihr Modell von jemandem verwendet werden kann, der mit einem anderen Framework arbeitet, empfehlen wir Ihnen, Ihr Modell sowohl mit PyTorch- als auch mit TensorFlow-Checkpoints zu konvertieren und hochzuladen. Während Benutzer immer noch in der Lage sind, Ihr Modell von einem anderen Framework zu laden, wenn Sie diesen Schritt überspringen, wird es langsamer sein, weil 🤗 Transformers den Checkpoint on-the-fly konvertieren müssen.</p> <p>Die Konvertierung eines Checkpoints für ein anderes Framework ist einfach. Stellen Sie sicher, dass Sie PyTorch und TensorFlow installiert haben (siehe <a href="installation">hier</a> für Installationsanweisungen), und finden Sie dann das spezifische Modell für Ihre Aufgabe in dem anderen Framework. </p> <div class="space-y-10 py-6 2xl:py-8 2xl:-mx-4"> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <span>Pytorch</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide Pytorch content</span></div></div> <div class="framework-content"> <p>Geben Sie <code>from_tf=True</code> an, um einen Prüfpunkt von TensorFlow nach PyTorch zu konvertieren:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>pt_model = DistilBertForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;path/to/awesome-name-you-picked&quot;</span>, from_tf=<span class="hljs-literal">True</span>) <span class="hljs-meta">&gt;&gt;&gt; </span>pt_model.save_pretrained(<span class="hljs-string">&quot;path/to/awesome-name-you-picked&quot;</span>)<!-- HTML_TAG_END --></pre></div></div></div> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <span>TensorFlow</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide TensorFlow content</span></div></div> <div class="framework-content"> <p>Geben Sie <code>from_pt=True</code> an, um einen Prüfpunkt von PyTorch nach TensorFlow zu konvertieren:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tf_model = TFDistilBertForSequenceClassification.from_pretrained(<span class="hljs-string">&quot;path/to/awesome-name-you-picked&quot;</span>, from_pt=<span class="hljs-literal">True</span>)<!-- HTML_TAG_END --></pre></div> <p>Dann können Sie Ihr neues TensorFlow-Modell mit seinem neuen Checkpoint speichern:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tf_model.save_pretrained(<span class="hljs-string">&quot;path/to/awesome-name-you-picked&quot;</span>)<!-- HTML_TAG_END --></pre></div> </div></div> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1.73em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 451 260.81"><style>.J { stroke: #dce0df; } .K { stroke-linejoin: round; } </style><g fill="#5e97f6" class="J K"><path d="M50.5 130.4l-25 43.31h50l25-43.31h-50z"></path><path d="M.5 217.01l25-43.3h50l-25 43.3H.5z"></path><path d="M125.5 173.71h-50l-25 43.3h50l25-43.3z"></path><path d="M175.5 173.71h-50l-25 43.3h50l25-43.3z"></path><path d="M150.5 130.4l-25 43.31h50l25-43.31h-50z"></path><path d="M175.5 87.1l-25 43.3h50l25-43.3h-50z"></path><path d="M200.5 43.8l-25 43.3h50l25-43.3h-50z"></path><path d="M225.5.5l-25 43.3h50l25-43.3h-50z"></path></g><g fill="#2a56c6" class="J K"><path d="M.5 217.01l25 43.3h50l-25-43.3H.5z"></path><path d="M125.5 260.31h-50l-25-43.3h50l25 43.3z"></path><path d="M175.5 260.31h-50l-25-43.3h50l25 43.3z"></path></g><g fill="#00796b" class="J K"><path d="M200.5 217.01l-25-43.3-25 43.3 25 43.3 25-43.3zm50-86.61l-25-43.3-25 43.3h50z"></path><path d="M250.5 43.8l-25 43.3 25 43.3 25-43.3-25-43.3z"></path></g><path d="M125.5 173.71l-25-43.31-25 43.31h50z" fill="#3367d6" class="J K"></path><g fill="#26a69a" class="J K"><path d="M250.5 130.4h-50l-25 43.31h50l25-43.31z"></path><path d="M300.5 130.4h-50l-25 43.31h50l25-43.31z"></path></g><g fill="#9c27b0" class="J K"><path d="M350.5 43.8L325.5.5l-25 43.3 25 43.3 25-43.3z"></path><path d="M375.5 87.1l-25-43.3-25 43.3 25 43.3 25-43.3z"></path><path d="M400.5 130.4l-25-43.3-25 43.3 25 43.31 25-43.31z"></path><path d="M425.5 173.71l-25-43.31-25 43.31 25 43.3 25-43.3z"></path><path d="M450.5 217.01l-25-43.3-25 43.3 25 43.3 25-43.3zM425.5.5l-25 43.3 25 43.3 25-43.3-25-43.3z"></path><path d="M375.5 87.1l25-43.3 25 43.3-25 43.3-25-43.3zm-25 43.3l-25 43.31 25 43.3 25-43.3-25-43.31z"></path><path d="M325.5 260.31l-25-43.3 25-43.3 25 43.3-25 43.3z"></path></g><path d="M275.5 260.31l-25-43.3h50l25 43.3h-50z" fill="#6a1b9a" class="J K"></path><g fill="#00695c" class="J K"><path d="M225.5 173.71h-50l25 43.3h50l-25-43.3z"></path><path d="M275.5 173.71h-50l25 43.3 25-43.3zm0-86.61l25 43.3h50l-25-43.3h-50z"></path><path d="M300.5 43.8h-50l25 43.3h50l-25-43.3zm125 216.51l-25-43.3h-50l25 43.3h50z"></path><path d="M375.5 173.71l-25 43.3h50l-25-43.3z"></path></g><g fill="#ea80fc" class="J K"><path d="M325.5.5h-50l-25 43.3h50l25-43.3zm0 173.21h-50l-25 43.3h50l25-43.3z"></path><path d="M350.5 130.4h-50l-25 43.31h50l25-43.31zM425.5.5h-50l-25 43.3h50l25-43.3z"></path><path d="M375.5 87.1l-25-43.3h50l-25 43.3z"></path></g></svg> <span>JAX</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide JAX content</span></div></div> <div class="framework-content"> <p>Wenn ein Modell in Flax verfügbar ist, können Sie auch einen Kontrollpunkt von PyTorch nach Flax konvertieren:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>flax_model = FlaxDistilBertForSequenceClassification.from_pretrained( <span class="hljs-meta">... </span> <span class="hljs-string">&quot;path/to/awesome-name-you-picked&quot;</span>, from_pt=<span class="hljs-literal">True</span> <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> </div></div></div> <h2 class="relative group"><a id="ein-modell-whrend-des-trainings-hochladen" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#ein-modell-whrend-des-trainings-hochladen"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Ein Modell während des Trainings hochladen </span></h2> <div class="space-y-10 py-6 2xl:py-8 2xl:-mx-4"> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><defs><clipPath id="a"><rect x="3.05" y="0.5" width="25.73" height="31" fill="none"></rect></clipPath></defs><g clip-path="url(#a)"><path d="M24.94,9.51a12.81,12.81,0,0,1,0,18.16,12.68,12.68,0,0,1-18,0,12.81,12.81,0,0,1,0-18.16l9-9V5l-.84.83-6,6a9.58,9.58,0,1,0,13.55,0ZM20.44,9a1.68,1.68,0,1,1,1.67-1.67A1.68,1.68,0,0,1,20.44,9Z" fill="#ee4c2c"></path></g></svg> <span>Pytorch</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide Pytorch content</span></div></div> <div class="framework-content"> <iframe class="w-full xl:w-4/6 h-80" src="https://www.youtube-nocookie.com/embed/Z1-XMy-GNLQ" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <p>Die Weitergabe eines Modells an den Hub ist so einfach wie das Hinzufügen eines zusätzlichen Parameters oder Rückrufs. Erinnern Sie sich an das <a href="training">Feinabstimmungs-Tutorial</a>, in der Klasse <code>TrainingArguments</code> geben Sie Hyperparameter und zusätzliche Trainingsoptionen an. Eine dieser Trainingsoptionen beinhaltet die Möglichkeit, ein Modell direkt an den Hub zu pushen. Setzen Sie <code>push_to_hub=True</code> in Ihrer <code>TrainingArguments</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>training_args = TrainingArguments(output_dir=<span class="hljs-string">&quot;my-awesome-model&quot;</span>, push_to_hub=<span class="hljs-literal">True</span>)<!-- HTML_TAG_END --></pre></div> <p>Übergeben Sie Ihre Trainingsargumente wie gewohnt an <code>Trainer</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>trainer = Trainer( <span class="hljs-meta">... </span> model=model, <span class="hljs-meta">... </span> args=training_args, <span class="hljs-meta">... </span> train_dataset=small_train_dataset, <span class="hljs-meta">... </span> eval_dataset=small_eval_dataset, <span class="hljs-meta">... </span> compute_metrics=compute_metrics, <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <p>Nach der Feinabstimmung Ihres Modells rufen Sie <code>push_to_hub()</code> auf <code>Trainer</code> auf, um das trainierte Modell an den Hub zu übertragen. Transformers fügt sogar automatisch Trainings-Hyperparameter, Trainingsergebnisse und Framework-Versionen zu Ihrer Modellkarte hinzu!</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>trainer.push_to_hub()<!-- HTML_TAG_END --></pre></div></div></div> <div class="border border-gray-200 rounded-xl px-4 relative"><div class="flex h-[22px] mt-[-12.5px] justify-between leading-none"><div class="flex px-1 items-center space-x-1 bg-white dark:bg-gray-950"><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="0.94em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 274"><path d="M145.726 42.065v42.07l72.861 42.07v-42.07l-72.86-42.07zM0 84.135v42.07l36.43 21.03V105.17L0 84.135zm109.291 21.035l-36.43 21.034v126.2l36.43 21.035v-84.135l36.435 21.035v-42.07l-36.435-21.034V105.17z" fill="#E55B2D"></path><path d="M145.726 42.065L36.43 105.17v42.065l72.861-42.065v42.065l36.435-21.03v-84.14zM255.022 63.1l-36.435 21.035v42.07l36.435-21.035V63.1zm-72.865 84.135l-36.43 21.035v42.07l36.43-21.036v-42.07zm-36.43 63.104l-36.436-21.035v84.135l36.435-21.035V210.34z" fill="#ED8E24"></path><path d="M145.726 0L0 84.135l36.43 21.035l109.296-63.105l72.861 42.07L255.022 63.1L145.726 0zm0 126.204l-36.435 21.03l36.435 21.036l36.43-21.035l-36.43-21.03z" fill="#F8BF3C"></path></svg> <span>TensorFlow</span></div> <div class="cursor-pointer flex items-center justify-center space-x-1 text-sm px-2 bg-white dark:bg-gray-950 hover:underline leading-none"><svg class="" width="0.9em" height="0.9em" viewBox="0 0 10 9" fill="currentColor" xmlns="http://www.w3.org/2000/svg"><path d="M1.39125 1.9725L0.0883333 0.669997L0.677917 0.0804138L8.9275 8.33041L8.33792 8.91958L6.95875 7.54041C6.22592 8.00523 5.37572 8.25138 4.50792 8.25C2.26125 8.25 0.392083 6.63333 0 4.5C0.179179 3.52946 0.667345 2.64287 1.39167 1.9725H1.39125ZM5.65667 6.23833L5.04667 5.62833C4.81335 5.73996 4.55116 5.77647 4.29622 5.73282C4.04129 5.68918 3.80617 5.56752 3.62328 5.38463C3.44039 5.20175 3.31874 4.96663 3.27509 4.71169C3.23144 4.45676 3.26795 4.19456 3.37958 3.96125L2.76958 3.35125C2.50447 3.75187 2.38595 4.2318 2.4341 4.70978C2.48225 5.18777 2.6941 5.63442 3.0338 5.97411C3.37349 6.31381 3.82015 6.52567 4.29813 6.57382C4.77611 6.62197 5.25605 6.50345 5.65667 6.23833ZM2.83042 1.06666C3.35 0.862497 3.91625 0.749997 4.50792 0.749997C6.75458 0.749997 8.62375 2.36666 9.01583 4.5C8.88816 5.19404 8.60119 5.84899 8.1775 6.41333L6.56917 4.805C6.61694 4.48317 6.58868 4.15463 6.48664 3.84569C6.3846 3.53675 6.21162 3.256 5.98156 3.02594C5.7515 2.79588 5.47075 2.6229 5.16181 2.52086C4.85287 2.41882 4.52433 2.39056 4.2025 2.43833L2.83042 1.06708V1.06666Z" fill="currentColor"></path></svg> <span>Hide TensorFlow content</span></div></div> <div class="framework-content"> <p>Geben Sie ein Modell mit <code>PushToHubCallback</code> an den Hub weiter. In der <code>PushToHubCallback</code> Funktion, fügen Sie hinzu:</p> <ul><li>Ein Ausgabeverzeichnis für Ihr Modell.</li> <li>Einen Tokenizer.</li> <li>Die <code>hub_model_id</code>, die Ihr Hub-Benutzername und Modellname ist.</li></ul> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers.keras.callbacks <span class="hljs-keyword">import</span> PushToHubCallback <span class="hljs-meta">&gt;&gt;&gt; </span>push_to_hub_callback = PushToHubCallback( <span class="hljs-meta">... </span> output_dir=<span class="hljs-string">&quot;./your_model_save_path&quot;</span>, tokenizer=tokenizer, hub_model_id=<span class="hljs-string">&quot;your-username/my-awesome-model&quot;</span> <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <p>Fügen Sie den Callback zu <a href="https://keras.io/api/models/model_training_apis/" rel="nofollow"><code>fit</code></a> hinzu, und 🤗 Transformers wird das trainierte Modell an den Hub weiterleiten:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>model.fit(tf_train_dataset, validation_data=tf_validation_dataset, epochs=<span class="hljs-number">3</span>, callbacks=push_to_hub_callback)<!-- HTML_TAG_END --></pre></div> </div></div> </div> <h2 class="relative group"><a id="verwenden-sie-die-funktion-pushtohub" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#verwenden-sie-die-funktion-pushtohub"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Verwenden Sie die Funktion <code>push_to_hub</code>. </span></h2> <p>Sie können <code>push_to_hub</code> auch direkt für Ihr Modell aufrufen, um es in den Hub hochzuladen.</p> <p>Geben Sie den Namen Ihres Modells in “push_to_hub” an:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>pt_model.push_to_hub(<span class="hljs-string">&quot;my-awesome-model&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Dadurch wird ein Repository unter Ihrem Benutzernamen mit dem Modellnamen <code>my-awesome-model</code> erstellt. Benutzer können nun Ihr Modell mit der Funktion <code>from_pretrained</code> laden:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModel <span class="hljs-meta">&gt;&gt;&gt; </span>model = AutoModel.from_pretrained(<span class="hljs-string">&quot;your_username/my-awesome-model&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Wenn Sie zu einer Organisation gehören und Ihr Modell stattdessen unter dem Namen der Organisation pushen wollen, fügen Sie diesen einfach zur <code>repo_id</code> hinzu:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>pt_model.push_to_hub(<span class="hljs-string">&quot;my-awesome-org/my-awesome-model&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Die Funktion “push_to_hub” kann auch verwendet werden, um andere Dateien zu einem Modell-Repository hinzuzufügen. Zum Beispiel kann man einen Tokenizer zu einem Modell-Repository hinzufügen:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tokenizer.push_to_hub(<span class="hljs-string">&quot;my-awesome-model&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Oder vielleicht möchten Sie die TensorFlow-Version Ihres fein abgestimmten PyTorch-Modells hinzufügen:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>tf_model.push_to_hub(<span class="hljs-string">&quot;my-awesome-model&quot;</span>)<!-- HTML_TAG_END --></pre></div> <p>Wenn Sie nun zu Ihrem Hugging Face-Profil navigieren, sollten Sie Ihr neu erstelltes Modell-Repository sehen. Wenn Sie auf die Registerkarte <strong>Dateien</strong> klicken, werden alle Dateien angezeigt, die Sie in das Repository hochgeladen haben.</p> <p>Weitere Einzelheiten zum Erstellen und Hochladen von Dateien in ein Repository finden Sie in der Hub-Dokumentation <a href="https://huggingface.co/docs/hub/how-to-upstream" rel="nofollow">hier</a>.</p> <h2 class="relative group"><a id="hochladen-mit-der-weboberflche" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#hochladen-mit-der-weboberflche"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Hochladen mit der Weboberfläche </span></h2> <p>Benutzer, die einen no-code Ansatz bevorzugen, können ein Modell über das Webinterface des Hubs hochladen. Besuchen Sie <a href="https://huggingface.co/new" rel="nofollow">huggingface.co/new</a> um ein neues Repository zu erstellen:</p> <p><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/new_model_repo.png" alt="new_model_repo"></p> <p>Fügen Sie von hier aus einige Informationen über Ihr Modell hinzu:</p> <ul><li>Wählen Sie den <strong>Besitzer</strong> des Repositorys. Dies können Sie selbst oder eine der Organisationen sein, denen Sie angehören.</li> <li>Wählen Sie einen Namen für Ihr Modell, der auch der Name des Repositorys sein wird.</li> <li>Wählen Sie, ob Ihr Modell öffentlich oder privat ist.</li> <li>Geben Sie die Lizenzverwendung für Ihr Modell an.</li></ul> <p>Klicken Sie nun auf die Registerkarte <strong>Dateien</strong> und klicken Sie auf die Schaltfläche <strong>Datei hinzufügen</strong>, um eine neue Datei in Ihr Repository hochzuladen. Ziehen Sie dann eine Datei per Drag-and-Drop hoch und fügen Sie eine Übergabemeldung hinzu.</p> <p><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/upload_file.png" alt="upload_file"></p> <h2 class="relative group"><a id="hinzufgen-einer-modellkarte" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#hinzufgen-einer-modellkarte"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Hinzufügen einer Modellkarte </span></h2> <p>Um sicherzustellen, dass die Benutzer die Fähigkeiten, Grenzen, möglichen Verzerrungen und ethischen Aspekte Ihres Modells verstehen, fügen Sie bitte eine Modellkarte zu Ihrem Repository hinzu. Die Modellkarte wird in der Datei <code>README.md</code> definiert. Sie können eine Modellkarte hinzufügen, indem Sie:</p> <ul><li>Manuelles Erstellen und Hochladen einer “README.md”-Datei.</li> <li>Klicken Sie auf die Schaltfläche <strong>Modellkarte bearbeiten</strong> in Ihrem Modell-Repository.</li></ul> <p>Werfen Sie einen Blick auf die DistilBert <a href="https://huggingface.co/distilbert-base-uncased" rel="nofollow">model card</a> als gutes Beispiel für die Art von Informationen, die eine Modellkarte enthalten sollte. Weitere Details über andere Optionen, die Sie in der Datei “README.md” einstellen können, wie z.B. den Kohlenstoff-Fußabdruck eines Modells oder Beispiele für Widgets, finden Sie in der Dokumentation <a href="https://huggingface.co/docs/hub/models-cards" rel="nofollow">hier</a>.</p> <script type="module" data-hydrate="1agzkqq"> import { start } from "/docs/transformers/pr_18789/de/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="1agzkqq"]').parentNode, paths: {"base":"/docs/transformers/pr_18789/de","assets":"/docs/transformers/pr_18789/de"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_18789/de/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_18789/de/_app/pages/model_sharing.mdx-hf-doc-builder.js") ], params: {} } }); </script>
498
0
hf_public_repos/doc-build-dev/transformers/pr_18789
hf_public_repos/doc-build-dev/transformers/pr_18789/de/accelerate.html
<meta charset="utf-8" /><meta http-equiv="content-security-policy" content=""><meta name="hf:doc:metadata" content="{&quot;local&quot;:&quot;verteiltes-training-mit-accelerate&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;einrichtung&quot;,&quot;title&quot;:&quot;Einrichtung&quot;},{&quot;local&quot;:&quot;vorbereiten-auf-die-beschleunigung&quot;,&quot;title&quot;:&quot;Vorbereiten auf die Beschleunigung&quot;},{&quot;local&quot;:&quot;rckwrts&quot;,&quot;title&quot;:&quot;Rückwärts&quot;},{&quot;local&quot;:&quot;trainieren&quot;,&quot;sections&quot;:[{&quot;local&quot;:&quot;trainieren-mit-einem-skript&quot;,&quot;title&quot;:&quot;Trainieren mit einem Skript&quot;},{&quot;local&quot;:&quot;trainieren-mit-einem-notebook&quot;,&quot;title&quot;:&quot;Trainieren mit einem Notebook&quot;}],&quot;title&quot;:&quot;Trainieren&quot;}],&quot;title&quot;:&quot;Verteiltes Training mit 🤗 Accelerate&quot;}" data-svelte="svelte-1phssyn"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/assets/pages/__layout.svelte-hf-doc-builder.css"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/start-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/chunks/vendor-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/chunks/paths-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/pages/__layout.svelte-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/pages/accelerate.mdx-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/chunks/IconCopyLink-hf-doc-builder.js"> <link rel="modulepreload" href="/docs/transformers/pr_18789/de/_app/chunks/CodeBlock-hf-doc-builder.js"> <h1 class="relative group"><a id="verteiltes-training-mit-accelerate" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#verteiltes-training-mit-accelerate"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Verteiltes Training mit 🤗 Accelerate </span></h1> <p>Da die Modelle immer größer werden, hat sich die Parallelität als Strategie zum Trainieren größerer Modelle auf begrenzter Hardware und zur Beschleunigung der Trainingsgeschwindigkeit um mehrere Größenordnungen erwiesen. Bei Hugging Face haben wir die Bibliothek <a href="https://huggingface.co/docs/accelerate" rel="nofollow">🤗 Accelerate</a> entwickelt, um Nutzern zu helfen, ein 🤗 Transformers-Modell auf jeder Art von verteiltem Setup zu trainieren, egal ob es sich um mehrere GPUs auf einer Maschine oder mehrere GPUs auf mehreren Maschinen handelt. In diesem Tutorial lernen Sie, wie Sie Ihre native PyTorch-Trainingsschleife anpassen, um das Training in einer verteilten Umgebung zu ermöglichen.</p> <h2 class="relative group"><a id="einrichtung" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#einrichtung"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Einrichtung </span></h2> <p>Beginnen Sie mit der Installation von 🤗 Accelerate:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->pip install accelerate<!-- HTML_TAG_END --></pre></div> <p>Dann importieren und erstellen Sie ein <code>Accelerator</code>-Objekt. Der <code>Accelerator</code> wird automatisch Ihre Art der verteilten Einrichtung erkennen und alle notwendigen Komponenten für das Training initialisieren. Sie müssen Ihr Modell nicht explizit auf einem Gerät platzieren.</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> accelerate <span class="hljs-keyword">import</span> Accelerator <span class="hljs-meta">&gt;&gt;&gt; </span>accelerator = Accelerator()<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="vorbereiten-auf-die-beschleunigung" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#vorbereiten-auf-die-beschleunigung"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Vorbereiten auf die Beschleunigung </span></h2> <p>Der nächste Schritt ist die Übergabe aller relevanten Trainingsobjekte an die Methode <code>prepare</code>. Dazu gehören Ihre Trainings- und Evaluierungs-DataLoader, ein Modell und ein Optimierer:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span>train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare( <span class="hljs-meta">... </span> train_dataloader, eval_dataloader, model, optimizer <span class="hljs-meta">... </span>)<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="rckwrts" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#rckwrts"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Rückwärts </span></h2> <p>Die letzte Ergänzung besteht darin, das typische <code>loss.backward()</code> in der Trainingsschleife durch die 🤗 Accelerate-Methode <code>backward</code> zu ersetzen:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">for</span> epoch <span class="hljs-keyword">in</span> <span class="hljs-built_in">range</span>(num_epochs): <span class="hljs-meta">... </span> <span class="hljs-keyword">for</span> batch <span class="hljs-keyword">in</span> train_dataloader: <span class="hljs-meta">... </span> outputs = model(**batch) <span class="hljs-meta">... </span> loss = outputs.loss <span class="hljs-meta">... </span> accelerator.backward(loss) <span class="hljs-meta">... </span> optimizer.step() <span class="hljs-meta">... </span> lr_scheduler.step() <span class="hljs-meta">... </span> optimizer.zero_grad() <span class="hljs-meta">... </span> progress_bar.update(<span class="hljs-number">1</span>)<!-- HTML_TAG_END --></pre></div> <p>Wie Sie im folgenden Code sehen können, müssen Sie nur vier zusätzliche Codezeilen zu Ihrer Trainingsschleife hinzufügen, um verteiltes Training zu ermöglichen!</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-addition">+ from accelerate import Accelerator</span> from transformers import AdamW, AutoModelForSequenceClassification, get_scheduler <span class="hljs-addition">+ accelerator = Accelerator()</span> model = AutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=2) optimizer = AdamW(model.parameters(), lr=3e-5) <span class="hljs-deletion">- device = torch.device(&quot;cuda&quot;) if torch.cuda.is_available() else torch.device(&quot;cpu&quot;)</span> <span class="hljs-deletion">- model.to(device)</span> <span class="hljs-addition">+ train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare(</span> <span class="hljs-addition">+ train_dataloader, eval_dataloader, model, optimizer</span> <span class="hljs-addition">+ )</span> num_epochs = 3 num_training_steps = num_epochs * len(train_dataloader) lr_scheduler = get_scheduler( &quot;linear&quot;, optimizer=optimizer, num_warmup_steps=0, num_training_steps=num_training_steps ) progress_bar = tqdm(range(num_training_steps)) model.train() for epoch in range(num_epochs): for batch in train_dataloader: <span class="hljs-deletion">- batch = {k: v.to(device) for k, v in batch.items()}</span> outputs = model(**batch) loss = outputs.loss <span class="hljs-deletion">- loss.backward()</span> <span class="hljs-addition">+ accelerator.backward(loss)</span> optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1)<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="trainieren" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#trainieren"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Trainieren </span></h2> <p>Sobald Sie die entsprechenden Codezeilen hinzugefügt haben, starten Sie Ihr Training in einem Skript oder einem Notebook wie Colaboratory.</p> <h3 class="relative group"><a id="trainieren-mit-einem-skript" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#trainieren-mit-einem-skript"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Trainieren mit einem Skript </span></h3> <p>Wenn Sie Ihr Training mit einem Skript durchführen, führen Sie den folgenden Befehl aus, um eine Konfigurationsdatei zu erstellen und zu speichern:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->accelerate config<!-- HTML_TAG_END --></pre></div> <p>Dann starten Sie Ihr Training mit:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START -->accelerate launch train.py<!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="trainieren-mit-einem-notebook" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#trainieren-mit-einem-notebook"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Trainieren mit einem Notebook </span></h3> <p>🤗 Accelerate kann auch in einem Notebook laufen, wenn Sie planen, die TPUs von Colaboratory zu verwenden. Verpacken Sie den gesamten Code, der für das Training verantwortlich ist, in eine Funktion und übergeben Sie diese an <code>notebook_launcher</code>:</p> <div class="code-block relative"><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre><!-- HTML_TAG_START --><span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> accelerate <span class="hljs-keyword">import</span> notebook_launcher <span class="hljs-meta">&gt;&gt;&gt; </span>notebook_launcher(training_function)<!-- HTML_TAG_END --></pre></div> <p>Weitere Informationen über 🤗 Accelerate und seine umfangreichen Funktionen finden Sie in der <a href="https://huggingface.co/docs/accelerate" rel="nofollow">Dokumentation</a>.</p> <script type="module" data-hydrate="1x25wlh"> import { start } from "/docs/transformers/pr_18789/de/_app/start-hf-doc-builder.js"; start({ target: document.querySelector('[data-hydrate="1x25wlh"]').parentNode, paths: {"base":"/docs/transformers/pr_18789/de","assets":"/docs/transformers/pr_18789/de"}, session: {}, route: false, spa: false, trailing_slash: "never", hydrate: { status: 200, error: null, nodes: [ import("/docs/transformers/pr_18789/de/_app/pages/__layout.svelte-hf-doc-builder.js"), import("/docs/transformers/pr_18789/de/_app/pages/accelerate.mdx-hf-doc-builder.js") ], params: {} } }); </script>
499